From c0cdd5e6c0800cc53bf2e745f90864b630c3d288 Mon Sep 17 00:00:00 2001 From: Jiri Mencak Date: Thu, 24 Jul 2025 14:55:03 +0200 Subject: [PATCH] Bump vendor dependencies Bump to k8s 1.33.3 and other vendor dependencies. Also adjust project code to reflect the vendor dependencies API changes. --- cmd/cluster-node-tuning-operator/main.go | 3 +- go.mod | 165 +- go.sum | 235 +- hack/run-render-command-functests.sh | 2 +- hack/run-test.sh | 2 +- hack/update-k8s-eviction-defaults.sh | 2 +- .../v2/performanceprofile_validation.go | 34 +- .../v2/performanceprofile_webhook.go | 3 +- .../v2/config/shared/errors/errors.go | 2 +- .../emicklei/go-restful/v3/CHANGES.md | 5 +- .../emicklei/go-restful/v3/README.md | 2 +- .../emicklei/go-restful/v3/jsr311.go | 19 +- .../emicklei/go-restful/v3/route.go | 2 + .../github.com/evanphx/json-patch/.gitignore | 6 - vendor/github.com/evanphx/json-patch/LICENSE | 25 - .../github.com/evanphx/json-patch/README.md | 317 - .../github.com/evanphx/json-patch/errors.go | 38 - vendor/github.com/evanphx/json-patch/merge.go | 389 - vendor/github.com/evanphx/json-patch/patch.go | 851 - .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 +- .../fsnotify/fsnotify/backend_fen.go | 324 +- .../fsnotify/fsnotify/backend_inotify.go | 594 +- .../fsnotify/fsnotify/backend_kqueue.go | 747 +- .../fsnotify/fsnotify/backend_other.go | 204 +- .../fsnotify/fsnotify/backend_windows.go | 305 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - vendor/github.com/golang/protobuf/LICENSE | 28 - .../golang/protobuf/proto/buffer.go | 324 - .../golang/protobuf/proto/defaults.go | 63 - .../golang/protobuf/proto/deprecated.go | 113 - .../golang/protobuf/proto/discard.go | 58 - .../golang/protobuf/proto/extensions.go | 356 - .../golang/protobuf/proto/properties.go | 306 - .../github.com/golang/protobuf/proto/proto.go | 167 - .../golang/protobuf/proto/registry.go | 317 - .../golang/protobuf/proto/text_decode.go | 801 - .../golang/protobuf/proto/text_encode.go | 560 - .../github.com/golang/protobuf/proto/wire.go | 78 - .../golang/protobuf/proto/wrappers.go | 34 - .../github.com/golang/protobuf/ptypes/any.go | 180 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 10 - .../golang/protobuf/ptypes/duration.go | 76 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 112 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - .../google/{gofuzz => btree}/LICENSE | 0 vendor/github.com/google/btree/README.md | 10 + vendor/github.com/google/btree/btree.go | 893 + .../github.com/google/btree/btree_generic.go | 1083 + .../gnostic-models/compiler/extensions.go | 8 +- .../gnostic-models/extensions/extension.pb.go | 96 +- .../gnostic-models/extensions/extensions.go | 6 +- .../gnostic-models/openapiv2/OpenAPIv2.pb.go | 1349 +- .../gnostic-models/openapiv3/OpenAPIv3.pb.go | 1763 +- .../openapiv3/annotations.pb.go | 182 + .../openapiv3/annotations.proto | 56 + vendor/github.com/google/gofuzz/.travis.yml | 10 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - vendor/github.com/google/gofuzz/fuzz.go | 605 - .../gorilla/websocket/.editorconfig | 20 - .../github.com/gorilla/websocket/.gitignore | 26 +- .../gorilla/websocket/.golangci.yml | 3 - vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 39 +- vendor/github.com/gorilla/websocket/Makefile | 34 - vendor/github.com/gorilla/websocket/README.md | 24 +- vendor/github.com/gorilla/websocket/client.go | 231 +- .../gorilla/websocket/compression.go | 11 +- vendor/github.com/gorilla/websocket/conn.go | 95 +- vendor/github.com/gorilla/websocket/mask.go | 4 - vendor/github.com/gorilla/websocket/proxy.go | 62 +- vendor/github.com/gorilla/websocket/server.go | 132 +- .../gorilla/websocket/tls_handshake.go | 18 - vendor/github.com/gorilla/websocket/util.go | 4 +- vendor/github.com/jaypipes/ghw/README.md | 4 +- .../jaypipes/ghw/pkg/block/block.go | 5 +- .../jaypipes/ghw/pkg/block/block_windows.go | 24 +- .../jaypipes/ghw/pkg/gpu/gpu_linux.go | 2 +- .../klauspost/compress/.gitattributes | 2 - .../klauspost/compress/.goreleaser.yml | 123 - .../github.com/klauspost/compress/README.md | 721 - .../github.com/klauspost/compress/SECURITY.md | 25 - .../klauspost/compress/compressible.go | 85 - .../klauspost/compress/fse/README.md | 79 - .../klauspost/compress/fse/bitreader.go | 122 - .../klauspost/compress/fse/bitwriter.go | 167 - .../klauspost/compress/fse/bytereader.go | 47 - .../klauspost/compress/fse/compress.go | 683 - .../klauspost/compress/fse/decompress.go | 376 - .../github.com/klauspost/compress/fse/fse.go | 144 - vendor/github.com/klauspost/compress/gen.sh | 4 - .../klauspost/compress/huff0/.gitignore | 1 - .../klauspost/compress/huff0/README.md | 89 - .../klauspost/compress/huff0/bitreader.go | 229 - .../klauspost/compress/huff0/bitwriter.go | 102 - .../klauspost/compress/huff0/compress.go | 742 - .../klauspost/compress/huff0/decompress.go | 1167 - .../compress/huff0/decompress_amd64.go | 226 - .../compress/huff0/decompress_amd64.s | 830 - .../compress/huff0/decompress_generic.go | 299 - .../klauspost/compress/huff0/huff0.go | 337 - .../compress/internal/cpuinfo/cpuinfo.go | 34 - .../internal/cpuinfo/cpuinfo_amd64.go | 11 - .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 - .../compress/internal/snapref/LICENSE | 27 - .../compress/internal/snapref/decode.go | 264 - .../compress/internal/snapref/decode_other.go | 113 - .../compress/internal/snapref/encode.go | 289 - .../compress/internal/snapref/encode_other.go | 250 - .../compress/internal/snapref/snappy.go | 98 - vendor/github.com/klauspost/compress/s2sx.mod | 4 - vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 - .../klauspost/compress/zstd/bitreader.go | 136 - .../klauspost/compress/zstd/bitwriter.go | 112 - .../klauspost/compress/zstd/blockdec.go | 731 - .../klauspost/compress/zstd/blockenc.go | 909 - .../compress/zstd/blocktype_string.go | 85 - .../klauspost/compress/zstd/bytebuf.go | 131 - .../klauspost/compress/zstd/bytereader.go | 82 - .../klauspost/compress/zstd/decodeheader.go | 261 - .../klauspost/compress/zstd/decoder.go | 948 - .../compress/zstd/decoder_options.go | 169 - .../klauspost/compress/zstd/dict.go | 565 - .../klauspost/compress/zstd/enc_base.go | 173 - .../klauspost/compress/zstd/enc_best.go | 560 - .../klauspost/compress/zstd/enc_better.go | 1252 - .../klauspost/compress/zstd/enc_dfast.go | 1123 - .../klauspost/compress/zstd/enc_fast.go | 891 - .../klauspost/compress/zstd/encoder.go | 642 - .../compress/zstd/encoder_options.go | 339 - .../klauspost/compress/zstd/framedec.go | 415 - .../klauspost/compress/zstd/frameenc.go | 137 - .../klauspost/compress/zstd/fse_decoder.go | 307 - .../compress/zstd/fse_decoder_amd64.go | 65 - .../compress/zstd/fse_decoder_amd64.s | 126 - .../compress/zstd/fse_decoder_generic.go | 73 - .../klauspost/compress/zstd/fse_encoder.go | 701 - .../klauspost/compress/zstd/fse_predefined.go | 158 - .../klauspost/compress/zstd/hash.go | 35 - .../klauspost/compress/zstd/history.go | 116 - .../compress/zstd/internal/xxhash/LICENSE.txt | 22 - .../compress/zstd/internal/xxhash/README.md | 71 - .../compress/zstd/internal/xxhash/xxhash.go | 230 - .../zstd/internal/xxhash/xxhash_amd64.s | 210 - .../zstd/internal/xxhash/xxhash_arm64.s | 184 - .../zstd/internal/xxhash/xxhash_asm.go | 16 - .../zstd/internal/xxhash/xxhash_other.go | 76 - .../zstd/internal/xxhash/xxhash_safe.go | 11 - .../klauspost/compress/zstd/matchlen_amd64.go | 16 - .../klauspost/compress/zstd/matchlen_amd64.s | 66 - .../compress/zstd/matchlen_generic.go | 33 - .../klauspost/compress/zstd/seqdec.go | 503 - .../klauspost/compress/zstd/seqdec_amd64.go | 394 - .../klauspost/compress/zstd/seqdec_amd64.s | 4151 -- .../klauspost/compress/zstd/seqdec_generic.go | 237 - .../klauspost/compress/zstd/seqenc.go | 114 - .../klauspost/compress/zstd/snappy.go | 434 - .../github.com/klauspost/compress/zstd/zip.go | 141 - .../klauspost/compress/zstd/zstd.go | 125 - vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 84 +- vendor/github.com/onsi/ginkgo/v2/README.md | 2 +- vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 58 +- .../onsi/ginkgo/v2/deprecated_dsl.go | 8 +- .../onsi/ginkgo/v2/formatter/formatter.go | 12 +- .../ginkgo/v2/ginkgo/build/build_command.go | 16 +- .../onsi/ginkgo/v2/ginkgo/command/abort.go | 6 +- .../onsi/ginkgo/v2/ginkgo/command/command.go | 6 +- .../onsi/ginkgo/v2/ginkgo/command/program.go | 2 - .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 8 +- .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 2 +- .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 2 +- .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 4 +- .../ginkgo/v2/ginkgo/watch/watch_command.go | 2 +- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 9 + .../onsi/ginkgo/v2/internal/failer.go | 2 +- .../interrupt_handler/interrupt_handler.go | 22 +- .../onsi/ginkgo/v2/internal/node.go | 34 +- .../ginkgo/v2/internal/output_interceptor.go | 14 +- .../v2/internal/output_interceptor_unix.go | 2 +- .../parallel_support/client_server.go | 2 +- .../internal/parallel_support/http_client.go | 9 +- .../internal/parallel_support/http_server.go | 4 +- .../internal/parallel_support/rpc_client.go | 2 +- .../internal/parallel_support/rpc_server.go | 10 +- .../parallel_support/server_handler.go | 4 +- .../onsi/ginkgo/v2/internal/report_entry.go | 2 +- .../internal/testingtproxy/testing_t_proxy.go | 43 +- .../onsi/ginkgo/v2/internal/writer.go | 6 +- .../ginkgo/v2/reporters/default_reporter.go | 4 +- .../onsi/ginkgo/v2/reporting_dsl.go | 12 +- vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 46 +- .../github.com/onsi/ginkgo/v2/types/config.go | 58 +- .../onsi/ginkgo/v2/types/deprecated_types.go | 2 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 22 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 10 +- .../onsi/ginkgo/v2/types/label_filter.go | 2 +- .../onsi/ginkgo/v2/types/report_entry.go | 8 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 52 +- .../github.com/onsi/gomega/format/format.go | 10 +- .../github.com/onsi/gomega/gbytes/buffer.go | 17 +- .../onsi/gomega/gbytes/say_matcher.go | 14 +- .../onsi/gomega/gcustom/make_matcher.go | 4 +- .../onsi/gomega/gexec/exit_matcher.go | 8 +- .../github.com/onsi/gomega/gexec/session.go | 6 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 26 +- .../onsi/gomega/gstruct/elements.go | 126 +- .../github.com/onsi/gomega/gstruct/fields.go | 106 +- .../github.com/onsi/gomega/gstruct/ignore.go | 22 +- vendor/github.com/onsi/gomega/gstruct/keys.go | 12 +- .../github.com/onsi/gomega/gstruct/pointer.go | 15 +- .../github.com/onsi/gomega/gstruct/types.go | 8 +- .../onsi/gomega/internal/assertion.go | 32 +- .../onsi/gomega/internal/async_assertion.go | 44 +- .../onsi/gomega/internal/duration_bundle.go | 2 +- .../github.com/onsi/gomega/internal/gomega.go | 20 +- .../gomega/internal/polling_signal_error.go | 2 +- .../onsi/gomega/internal/vetoptdesc.go | 2 +- vendor/github.com/onsi/gomega/matchers.go | 72 +- vendor/github.com/onsi/gomega/matchers/and.go | 8 +- .../matchers/assignable_to_type_of_matcher.go | 8 +- .../onsi/gomega/matchers/be_a_directory.go | 8 +- .../onsi/gomega/matchers/be_a_regular_file.go | 8 +- .../gomega/matchers/be_an_existing_file.go | 8 +- .../onsi/gomega/matchers/be_closed_matcher.go | 6 +- .../matchers/be_comparable_to_matcher.go | 8 +- .../gomega/matchers/be_element_of_matcher.go | 8 +- .../onsi/gomega/matchers/be_empty_matcher.go | 6 +- .../matchers/be_equivalent_to_matcher.go | 8 +- .../onsi/gomega/matchers/be_false_matcher.go | 6 +- .../onsi/gomega/matchers/be_identical_to.go | 8 +- .../onsi/gomega/matchers/be_key_of_matcher.go | 8 +- .../onsi/gomega/matchers/be_nil_matcher.go | 6 +- .../gomega/matchers/be_numerically_matcher.go | 10 +- .../onsi/gomega/matchers/be_sent_matcher.go | 10 +- .../gomega/matchers/be_temporally_matcher.go | 8 +- .../onsi/gomega/matchers/be_true_matcher.go | 6 +- .../onsi/gomega/matchers/be_zero_matcher.go | 6 +- .../onsi/gomega/matchers/consist_of.go | 34 +- .../matchers/contain_element_matcher.go | 18 +- .../matchers/contain_elements_matcher.go | 10 +- .../matchers/contain_substring_matcher.go | 8 +- .../onsi/gomega/matchers/equal_matcher.go | 8 +- .../onsi/gomega/matchers/have_cap_matcher.go | 6 +- .../onsi/gomega/matchers/have_each_matcher.go | 14 +- .../gomega/matchers/have_exact_elements.go | 8 +- .../matchers/have_existing_field_matcher.go | 6 +- .../onsi/gomega/matchers/have_field.go | 10 +- .../gomega/matchers/have_http_body_matcher.go | 12 +- .../have_http_header_with_value_matcher.go | 10 +- .../matchers/have_http_status_matcher.go | 10 +- .../onsi/gomega/matchers/have_key_matcher.go | 8 +- .../matchers/have_key_with_value_matcher.go | 12 +- .../onsi/gomega/matchers/have_len_matcher.go | 6 +- .../gomega/matchers/have_occurred_matcher.go | 6 +- .../gomega/matchers/have_prefix_matcher.go | 8 +- .../gomega/matchers/have_suffix_matcher.go | 8 +- .../onsi/gomega/matchers/have_value.go | 8 +- .../gomega/matchers/match_error_matcher.go | 4 +- .../gomega/matchers/match_json_matcher.go | 16 +- .../gomega/matchers/match_regexp_matcher.go | 8 +- .../onsi/gomega/matchers/match_xml_matcher.go | 10 +- .../gomega/matchers/match_yaml_matcher.go | 20 +- vendor/github.com/onsi/gomega/matchers/not.go | 8 +- vendor/github.com/onsi/gomega/matchers/or.go | 8 +- .../onsi/gomega/matchers/panic_matcher.go | 10 +- .../onsi/gomega/matchers/receive_matcher.go | 16 +- .../onsi/gomega/matchers/satisfy_matcher.go | 10 +- .../matchers/semi_structured_data_support.go | 32 +- .../onsi/gomega/matchers/succeed_matcher.go | 6 +- .../goraph/bipartitegraph/bipartitegraph.go | 4 +- .../matchers/support/goraph/node/node.go | 2 +- .../onsi/gomega/matchers/type_support.go | 40 +- .../onsi/gomega/matchers/with_transform.go | 14 +- vendor/github.com/onsi/gomega/types/types.go | 49 +- .../openshift-kni/k8sreporter/reporter.go | 43 +- .../openshift/api/.ci-operator.yaml | 2 +- .../github.com/openshift/api/.golangci.yaml | 80 +- .../github.com/openshift/api/Dockerfile.ocp | 6 +- vendor/github.com/openshift/api/Makefile | 2 +- vendor/github.com/openshift/api/README.md | 2 + .../openshift/api/apps/v1/generated.proto | 9 + .../github.com/openshift/api/apps/v1/types.go | 9 + .../api/authorization/v1/generated.proto | 2 + .../openshift/api/authorization/v1/types.go | 2 + .../openshift/api/build/v1/generated.proto | 15 + .../openshift/api/build/v1/types.go | 15 + .../openshift/api/config/v1/register.go | 4 + .../api/config/v1/types_apiserver.go | 4 +- .../api/config/v1/types_authentication.go | 443 +- .../config/v1/types_cluster_image_policy.go | 87 + .../api/config/v1/types_cluster_version.go | 5 +- .../openshift/api/config/v1/types_console.go | 1 + .../openshift/api/config/v1/types_feature.go | 1 + .../api/config/v1/types_image_policy.go | 322 + .../api/config/v1/types_infrastructure.go | 33 +- .../openshift/api/config/v1/types_ingress.go | 2 +- .../openshift/api/config/v1/types_network.go | 5 + .../api/config/v1/types_operatorhub.go | 1 + .../api/config/v1/zz_generated.deepcopy.go | 478 +- ..._generated.featuregated-crd-manifests.yaml | 53 +- .../v1/zz_generated.swagger_doc_generated.go | 278 +- .../api/config/v1alpha1/types_backup.go | 4 +- .../v1alpha1/types_cluster_monitoring.go | 233 +- .../api/config/v1alpha1/types_image_policy.go | 5 +- .../config/v1alpha1/zz_generated.deepcopy.go | 113 +- .../zz_generated.swagger_doc_generated.go | 47 +- .../api/console/v1/types_console_plugin.go | 4 +- .../openshift/api/envtest-releases.yaml | 13 + vendor/github.com/openshift/api/features.md | 76 +- .../openshift/api/features/features.go | 348 +- .../api/features/legacyfeaturegates.go | 8 - .../openshift/api/image/v1/generated.proto | 12 +- .../openshift/api/image/v1/types.go | 12 +- .../v1/zz_generated.swagger_doc_generated.go | 4 +- .../openshift/api/imageregistry/v1/types.go | 4 +- .../v1/types_controlplanemachineset.go | 3 +- .../api/machine/v1/types_nutanixprovider.go | 4 +- .../api/machine/v1beta1/types_awsprovider.go | 4 +- .../machine/v1beta1/types_azureprovider.go | 8 +- .../api/machine/v1beta1/types_machine.go | 1 + .../v1beta1/types_machinehealthcheck.go | 3 +- .../api/machine/v1beta1/types_machineset.go | 2 + .../api/machine/v1beta1/types_provider.go | 2 +- .../machine/v1beta1/zz_generated.deepcopy.go | 4 +- .../zz_generated.swagger_doc_generated.go | 2 +- .../api/machineconfiguration/v1/register.go | 2 + .../api/machineconfiguration/v1/types.go | 12 +- .../v1/types_machineconfignode.go | 245 + .../v1/types_machineosbuild.go | 4 +- .../v1/zz_generated.deepcopy.go | 177 + ..._generated.featuregated-crd-manifests.yaml | 80 +- .../v1/zz_generated.swagger_doc_generated.go | 87 +- .../v1alpha1/types_machineosbuild.go | 2 + .../v1alpha1/types_machineosconfig.go | 2 +- .../api/networkoperator/v1/generated.proto | 2 - .../networkoperator/v1/types_egressrouter.go | 4 +- .../api/openshiftcontrolplane/v1/types.go | 5 +- .../v1/zz_generated.deepcopy.go | 1 - .../openshift/api/operator/v1/types.go | 3 +- .../api/operator/v1/types_authentication.go | 2 +- .../api/operator/v1/types_console.go | 4 +- .../operator/v1/types_csi_cluster_driver.go | 4 - .../openshift/api/operator/v1/types_etcd.go | 4 +- .../api/operator/v1/types_ingress.go | 8 +- .../api/operator/v1/types_kubeapiserver.go | 1 - .../operator/v1/types_machineconfiguration.go | 2 - .../api/operator/v1/types_network.go | 26 +- ..._generated.featuregated-crd-manifests.yaml | 4 - .../v1/zz_generated.swagger_doc_generated.go | 16 +- .../api/operator/v1alpha1/types_etcdbackup.go | 1 - .../openshift/api/route/v1/generated.proto | 1 + .../openshift/api/route/v1/types.go | 1 + .../openshift/api/samples/v1/generated.proto | 7 + .../openshift/api/samples/v1/types_config.go | 7 + .../openshift/api/security/v1/generated.proto | 4 + .../openshift/api/security/v1/types.go | 4 + .../v1alpha1/types_shared_configmap.go | 2 +- .../v1alpha1/types_shared_secret.go | 2 +- .../openshift/api/template/v1/generated.proto | 2 + .../openshift/api/template/v1/types.go | 2 + .../build-machinery-go/.ci-operator.yaml | 2 +- .../Dockerfile.commitchecker | 4 +- .../make/targets/golang/verify-update.mk | 5 +- .../make/targets/openshift/yq.mk | 3 +- .../config/v1/clusterimagepolicy.go | 246 + .../config/v1/clusterimagepolicyspec.go | 38 + .../config/v1/clusterimagepolicystatus.go | 32 + .../config/v1/extramapping.go | 32 + .../config/v1/fulciocawithrekor.go | 45 + .../config/v1/imagepolicy.go | 248 + .../config/v1/imagepolicyspec.go | 38 + .../config/v1/imagepolicystatus.go | 32 + .../applyconfigurations/config/v1/pki.go | 45 + .../config/v1/pkicertificatesubject.go | 32 + .../applyconfigurations/config/v1/policy.go | 32 + .../config/v1/policyfulciosubject.go | 32 + .../config/v1/policyidentity.go | 45 + .../config/v1/policymatchexactrepository.go | 27 + .../config/v1/policymatchremapidentity.go | 36 + .../config/v1/policyrootoftrust.go | 54 + .../config/v1/publickey.go | 36 + .../config/v1/tokenclaimmappings.go | 27 +- .../v1/tokenclaimorexpressionmapping.go | 32 + .../config/v1/usernameclaimmapping.go | 8 +- .../config/v1alpha2/custom.go | 28 + .../config/v1alpha2/gatherconfig.go | 47 + .../config/v1alpha2/gathererconfig.go | 36 + .../config/v1alpha2/gatherers.go | 36 + .../config/v1alpha2/insightsdatagather.go | 246 + .../config/v1alpha2/insightsdatagatherspec.go | 23 + .../persistentvolumeclaimreference.go | 23 + .../config/v1alpha2/persistentvolumeconfig.go | 32 + .../config/v1alpha2/storage.go | 36 + .../applyconfigurations/internal/internal.go | 376 +- .../config/clientset/versioned/clientset.go | 13 + .../clientset/versioned/scheme/register.go | 2 + .../typed/config/v1/clusterimagepolicy.go | 58 + .../typed/config/v1/config_client.go | 22 +- .../typed/config/v1/generated_expansion.go | 4 + .../versioned/typed/config/v1/imagepolicy.go | 58 + .../typed/config/v1alpha1/config_client.go | 12 +- .../typed/config/v1alpha2/config_client.go | 85 + .../versioned/typed/config/v1alpha2/doc.go | 4 + .../config/v1alpha2/generated_expansion.go | 5 + .../config/v1alpha2/insightsdatagather.go | 58 + .../externalversions/config/interface.go | 8 + .../externalversions/config/v1/apiserver.go | 16 +- .../config/v1/authentication.go | 16 +- .../externalversions/config/v1/build.go | 16 +- .../config/v1/clusterimagepolicy.go | 85 + .../config/v1/clusteroperator.go | 16 +- .../config/v1/clusterversion.go | 16 +- .../externalversions/config/v1/console.go | 16 +- .../externalversions/config/v1/dns.go | 16 +- .../externalversions/config/v1/featuregate.go | 16 +- .../externalversions/config/v1/image.go | 16 +- .../config/v1/imagecontentpolicy.go | 16 +- .../config/v1/imagedigestmirrorset.go | 16 +- .../externalversions/config/v1/imagepolicy.go | 86 + .../config/v1/imagetagmirrorset.go | 16 +- .../config/v1/infrastructure.go | 16 +- .../externalversions/config/v1/ingress.go | 16 +- .../externalversions/config/v1/interface.go | 14 + .../externalversions/config/v1/network.go | 16 +- .../externalversions/config/v1/node.go | 16 +- .../externalversions/config/v1/oauth.go | 16 +- .../externalversions/config/v1/operatorhub.go | 16 +- .../externalversions/config/v1/project.go | 16 +- .../externalversions/config/v1/proxy.go | 16 +- .../externalversions/config/v1/scheduler.go | 16 +- .../config/v1alpha1/backup.go | 16 +- .../config/v1alpha1/clusterimagepolicy.go | 16 +- .../config/v1alpha1/clustermonitoring.go | 16 +- .../config/v1alpha1/imagepolicy.go | 16 +- .../config/v1alpha1/insightsdatagather.go | 16 +- .../config/v1alpha2/insightsdatagather.go | 85 + .../config/v1alpha2/interface.go | 29 + .../informers/externalversions/generic.go | 9 + .../listers/config/v1/clusterimagepolicy.go | 32 + .../listers/config/v1/expansion_generated.go | 12 + .../config/listers/config/v1/imagepolicy.go | 54 + .../config/v1alpha2/expansion_generated.go | 7 + .../config/v1alpha2/insightsdatagather.go | 32 + .../applyconfigurations/internal/internal.go | 11 +- .../v1/machineconfignode.go | 246 + .../v1/machineconfignodespec.go | 41 + ...chineconfignodespecmachineconfigversion.go | 23 + .../v1/machineconfignodestatus.go | 64 + ...ineconfignodestatusmachineconfigversion.go | 32 + .../machineconfignodestatuspinnedimageset.go | 59 + .../v1/mcoobjectreference.go | 23 + .../v1/generated_expansion.go | 2 + .../v1/machineconfignode.go | 60 + .../v1/machineconfiguration_client.go | 17 +- .../v1alpha1/machineconfiguration_client.go | 12 +- .../informers/externalversions/generic.go | 2 + .../v1/containerruntimeconfig.go | 16 +- .../v1/controllerconfig.go | 16 +- .../machineconfiguration/v1/interface.go | 7 + .../machineconfiguration/v1/kubeletconfig.go | 16 +- .../machineconfiguration/v1/machineconfig.go | 16 +- .../v1/machineconfignode.go | 85 + .../v1/machineconfigpool.go | 16 +- .../machineconfiguration/v1/machineosbuild.go | 16 +- .../v1/machineosconfig.go | 16 +- .../machineconfiguration/v1/pinnedimageset.go | 16 +- .../v1alpha1/machineconfignode.go | 16 +- .../v1alpha1/machineosbuild.go | 16 +- .../v1alpha1/machineosconfig.go | 16 +- .../v1alpha1/pinnedimageset.go | 16 +- .../v1/expansion_generated.go | 4 + .../v1/machineconfignode.go | 32 + .../applyconfigurations/internal/internal.go | 4473 ++ .../operator/v1/accesslogging.go | 68 + .../v1/additionalnetworkdefinition.go | 63 + .../v1/additionalroutingcapabilities.go | 29 + .../operator/v1/addpage.go | 25 + .../operator/v1/authentication.go | 246 + .../operator/v1/authenticationspec.go | 60 + .../operator/v1/authenticationstatus.go | 82 + .../v1/awsclassicloadbalancerparameters.go | 36 + .../operator/v1/awscsidriverconfigspec.go | 32 + .../operator/v1/awsefsvolumemetrics.go | 36 + .../awsefsvolumemetricsrecursivewalkconfig.go | 32 + .../operator/v1/awsloadbalancerparameters.go | 45 + .../v1/awsnetworkloadbalancerparameters.go | 38 + .../operator/v1/awssubnets.go | 40 + .../operator/v1/azurecsidriverconfigspec.go | 23 + .../operator/v1/azurediskencryptionset.go | 41 + .../operator/v1/capability.go | 36 + .../operator/v1/capabilityvisibility.go | 27 + .../operator/v1/clienttls.go | 48 + .../operator/v1/cloudcredential.go | 246 + .../operator/v1/cloudcredentialspec.go | 69 + .../operator/v1/cloudcredentialstatus.go | 73 + .../operator/v1/clustercsidriver.go | 246 + .../operator/v1/clustercsidriverspec.go | 78 + .../operator/v1/clustercsidriverstatus.go | 73 + .../operator/v1/clusternetworkentry.go | 32 + .../applyconfigurations/operator/v1/config.go | 246 + .../operator/v1/configmapfilereference.go | 32 + .../operator/v1/configspec.go | 60 + .../operator/v1/configstatus.go | 73 + .../operator/v1/console.go | 246 + .../operator/v1/consoleconfigroute.go | 36 + .../operator/v1/consolecustomization.go | 133 + .../operator/v1/consoleproviders.go | 23 + .../operator/v1/consolespec.go | 107 + .../operator/v1/consolestatus.go | 73 + .../containerloggingdestinationparameters.go | 23 + .../operator/v1/csidriverconfigspec.go | 72 + .../operator/v1/csisnapshotcontroller.go | 246 + .../operator/v1/csisnapshotcontrollerspec.go | 60 + .../v1/csisnapshotcontrollerstatus.go | 73 + .../operator/v1/defaultnetworkdefinition.go | 45 + .../v1/developerconsolecatalogcategory.go | 55 + .../v1/developerconsolecatalogcategorymeta.go | 43 + .../developerconsolecatalogcustomization.go | 37 + .../v1/developerconsolecatalogtypes.go | 45 + .../applyconfigurations/operator/v1/dns.go | 246 + .../operator/v1/dnscache.go | 36 + .../operator/v1/dnsnodeplacement.go | 44 + .../operator/v1/dnsovertlsconfig.go | 36 + .../operator/v1/dnsspec.go | 86 + .../operator/v1/dnsstatus.go | 46 + .../operator/v1/dnstransportconfig.go | 36 + .../operator/v1/egressipconfig.go | 23 + .../operator/v1/endpointpublishingstrategy.go | 63 + .../applyconfigurations/operator/v1/etcd.go | 246 + .../operator/v1/etcdspec.go | 102 + .../operator/v1/etcdstatus.go | 107 + .../operator/v1/exportnetworkflows.go | 41 + .../operator/v1/featuresmigration.go | 41 + .../operator/v1/filereferencesource.go | 36 + .../operator/v1/forwardplugin.go | 56 + .../operator/v1/gatewayconfig.go | 54 + .../operator/v1/gathererstatus.go | 51 + .../operator/v1/gatherstatus.go | 50 + .../operator/v1/gcpcsidriverconfigspec.go | 23 + .../operator/v1/gcpkmskeyreference.go | 50 + .../operator/v1/gcploadbalancerparameters.go | 27 + .../operator/v1/generationstatus.go | 68 + .../operator/v1/healthcheck.go | 54 + .../operator/v1/hostnetworkstrategy.go | 54 + .../operator/v1/httpcompressionpolicy.go | 29 + .../operator/v1/hybridoverlayconfig.go | 37 + .../v1/ibmcloudcsidriverconfigspec.go | 23 + .../operator/v1/ibmloadbalancerparameters.go | 27 + .../operator/v1/ingress.go | 32 + .../operator/v1/ingresscontroller.go | 248 + .../v1/ingresscontrollercapturehttpcookie.go | 52 + ...ingresscontrollercapturehttpcookieunion.go | 45 + .../v1/ingresscontrollercapturehttpheader.go | 32 + .../v1/ingresscontrollercapturehttpheaders.go | 42 + .../v1/ingresscontrollerhttpheader.go | 32 + .../v1/ingresscontrollerhttpheaderactions.go | 42 + .../ingresscontrollerhttpheaderactionunion.go | 36 + .../v1/ingresscontrollerhttpheaders.go | 56 + ...gresscontrollerhttpuniqueidheaderpolicy.go | 32 + .../operator/v1/ingresscontrollerlogging.go | 23 + .../v1/ingresscontrollersethttpheader.go | 23 + .../operator/v1/ingresscontrollerspec.go | 184 + .../operator/v1/ingresscontrollerstatus.go | 105 + .../v1/ingresscontrollertuningoptions.go | 135 + .../operator/v1/insightsoperator.go | 246 + .../operator/v1/insightsoperatorspec.go | 60 + .../operator/v1/insightsoperatorstatus.go | 91 + .../operator/v1/insightsreport.go | 41 + .../operator/v1/ipamconfig.go | 36 + .../operator/v1/ipfixconfig.go | 29 + .../operator/v1/ipsecconfig.go | 36 + .../operator/v1/ipsecfullmodeconfig.go | 27 + .../operator/v1/ipv4gatewayconfig.go | 23 + .../operator/v1/ipv4ovnkubernetesconfig.go | 32 + .../operator/v1/ipv6gatewayconfig.go | 23 + .../operator/v1/ipv6ovnkubernetesconfig.go | 32 + .../operator/v1/kubeapiserver.go | 246 + .../operator/v1/kubeapiserverspec.go | 84 + .../operator/v1/kubeapiserverstatus.go | 108 + .../operator/v1/kubecontrollermanager.go | 246 + .../operator/v1/kubecontrollermanagerspec.go | 93 + .../v1/kubecontrollermanagerstatus.go | 94 + .../operator/v1/kubescheduler.go | 246 + .../operator/v1/kubeschedulerspec.go | 84 + .../operator/v1/kubeschedulerstatus.go | 94 + .../operator/v1/kubestorageversionmigrator.go | 246 + .../v1/kubestorageversionmigratorspec.go | 60 + .../v1/kubestorageversionmigratorstatus.go | 73 + .../operator/v1/loadbalancerstrategy.go | 56 + .../operator/v1/loggingdestination.go | 45 + .../applyconfigurations/operator/v1/logo.go | 41 + .../operator/v1/machineconfiguration.go | 246 + .../operator/v1/machineconfigurationspec.go | 102 + .../operator/v1/machineconfigurationstatus.go | 59 + .../operator/v1/machinemanager.go | 45 + .../operator/v1/machinemanagerselector.go | 36 + .../operator/v1/managedbootimages.go | 28 + .../operator/v1/mtumigration.go | 32 + .../operator/v1/mtumigrationvalues.go | 32 + .../operator/v1/netflowconfig.go | 29 + .../operator/v1/network.go | 246 + .../operator/v1/networkmigration.go | 54 + .../operator/v1/networkspec.go | 180 + .../operator/v1/networkstatus.go | 73 + .../v1/nodedisruptionpolicyclusterstatus.go | 51 + .../operator/v1/nodedisruptionpolicyconfig.go | 51 + .../v1/nodedisruptionpolicyspecaction.go | 45 + .../v1/nodedisruptionpolicyspecfile.go | 37 + .../v1/nodedisruptionpolicyspecsshkey.go | 28 + .../v1/nodedisruptionpolicyspecunit.go | 41 + .../operator/v1/nodedisruptionpolicystatus.go | 23 + .../v1/nodedisruptionpolicystatusaction.go | 45 + .../v1/nodedisruptionpolicystatusfile.go | 37 + .../v1/nodedisruptionpolicystatussshkey.go | 28 + .../v1/nodedisruptionpolicystatusunit.go | 41 + .../operator/v1/nodeplacement.go | 39 + .../operator/v1/nodeportstrategy.go | 27 + .../operator/v1/nodestatus.go | 101 + .../operator/v1/oauthapiserverstatus.go | 23 + .../applyconfigurations/operator/v1/olm.go | 246 + .../operator/v1/olmspec.go | 60 + .../operator/v1/olmstatus.go | 73 + .../operator/v1/openshiftapiserver.go | 246 + .../operator/v1/openshiftapiserverspec.go | 60 + .../operator/v1/openshiftapiserverstatus.go | 73 + .../operator/v1/openshiftcontrollermanager.go | 246 + .../v1/openshiftcontrollermanagerspec.go | 60 + .../v1/openshiftcontrollermanagerstatus.go | 73 + .../operator/v1/openshiftsdnconfig.go | 63 + .../v1/openstackloadbalancerparameters.go | 23 + .../operator/v1/operatorcondition.go | 64 + .../operator/v1/operatorspec.go | 64 + .../operator/v1/operatorstatus.go | 78 + .../operator/v1/ovnkubernetesconfig.go | 126 + .../operator/v1/partialselector.go | 27 + .../operator/v1/perspective.go | 53 + .../operator/v1/perspectivevisibility.go | 36 + .../operator/v1/pinnedresourcereference.go | 41 + .../operator/v1/policyauditconfig.go | 59 + .../operator/v1/privatestrategy.go | 27 + .../operator/v1/projectaccess.go | 25 + .../v1/providerloadbalancerparameters.go | 63 + .../operator/v1/proxyconfig.go | 51 + .../operator/v1/quickstarts.go | 25 + .../operator/v1/reloadservice.go | 27 + .../v1/resourceattributesaccessreview.go | 40 + .../operator/v1/restartservice.go | 27 + .../operator/v1/routeadmissionpolicy.go | 36 + .../applyconfigurations/operator/v1/server.go | 43 + .../operator/v1/serviceaccountissuerstatus.go | 36 + .../operator/v1/serviceca.go | 246 + .../operator/v1/servicecaspec.go | 60 + .../operator/v1/servicecastatus.go | 73 + .../operator/v1/servicecatalogapiserver.go | 246 + .../v1/servicecatalogapiserverspec.go | 60 + .../v1/servicecatalogapiserverstatus.go | 73 + .../v1/servicecatalogcontrollermanager.go | 246 + .../v1/servicecatalogcontrollermanagerspec.go | 60 + .../servicecatalogcontrollermanagerstatus.go | 73 + .../operator/v1/sflowconfig.go | 29 + .../operator/v1/simplemacvlanconfig.go | 54 + .../operator/v1/staticipamaddresses.go | 32 + .../operator/v1/staticipamconfig.go | 51 + .../operator/v1/staticipamdns.go | 45 + .../operator/v1/staticipamroutes.go | 32 + .../operator/v1/staticpodoperatorspec.go | 87 + .../operator/v1/staticpodoperatorstatus.go | 96 + .../operator/v1/statuspageprovider.go | 23 + .../operator/v1/storage.go | 246 + .../operator/v1/storagespec.go | 69 + .../operator/v1/storagestatus.go | 73 + .../v1/syslogloggingdestinationparameters.go | 50 + .../applyconfigurations/operator/v1/theme.go | 36 + .../operator/v1/upstream.go | 45 + .../operator/v1/upstreamresolvers.go | 59 + .../operator/v1/vspherecsidriverconfigspec.go | 61 + .../hypershift/api/hypershift/register.go | 3 - .../api/hypershift/v1beta1/agent.go | 20 + .../hypershift/api/hypershift/v1beta1/aws.go | 961 + .../api/hypershift/v1beta1/azure.go | 737 + ...certificatesigningrequestapproval_types.go | 22 +- .../api/hypershift/v1beta1/clusterconfig.go | 35 + .../v1beta1/controlplanecomponent_types.go | 111 + .../v1beta1/endpointservice_types.go | 75 +- .../hypershift/v1beta1/groupversion_info.go | 3 +- .../hypershift/v1beta1/hosted_controlplane.go | 239 +- .../v1beta1/hostedcluster_conditions.go | 27 + .../hypershift/v1beta1/hostedcluster_types.go | 2125 +- .../api/hypershift/v1beta1/ibmcloud.go | 86 + .../api/hypershift/v1beta1/kubevirt.go | 403 + .../hypershift/v1beta1/nodepool_conditions.go | 45 +- .../api/hypershift/v1beta1/nodepool_types.go | 801 +- .../api/hypershift/v1beta1/openstack.go | 451 + .../api/hypershift/v1beta1/operator.go | 35 + .../api/hypershift/v1beta1/powervs.go | 324 + .../v1beta1/zz_generated.deepcopy.go | 1120 +- ..._generated.featuregated-crd-manifests.yaml | 236 + .../hypershift/api/util/ipnet/ipnet.go | 2 + .../pkg/apiserver/jsonpatch/jsonpatch.go | 87 + .../pkg/controller/factory/base_controller.go | 55 +- .../controller/factory/controller_context.go | 1 + .../pkg/controller/factory/factory.go | 78 +- .../pkg/controller/factory/interfaces.go | 9 + .../openshift/library-go/pkg/crypto/crypto.go | 129 +- .../pkg/operator/certrotation/annotations.go | 63 +- .../pkg/operator/certrotation/cabundle.go | 60 +- .../client_cert_rotation_controller.go | 21 +- .../pkg/operator/certrotation/metadata.go | 2 +- .../pkg/operator/certrotation/signer.go | 104 +- .../pkg/operator/certrotation/target.go | 117 +- .../pkg/operator/configobserver/OWNERS | 4 + .../config_observer_controller.go | 46 +- .../featuregates/featuregate.go | 22 +- .../pkg/operator/events/recorder.go | 38 +- .../pkg/operator/events/recorder_in_memory.go | 14 +- .../pkg/operator/events/recorder_logging.go | 13 +- .../pkg/operator/events/recorder_upstream.go | 9 +- .../resourceapply/admissionregistration.go | 147 +- .../resource/resourceapply/apiextensions.go | 7 +- .../resource/resourceapply/apiregistration.go | 5 +- .../operator/resource/resourceapply/apps.go | 33 +- .../operator/resource/resourceapply/core.go | 95 +- .../resource/resourceapply/event_helpers.go | 56 - .../resource/resourceapply/generic.go | 45 + .../resourceapply/json_patch_helpers.go | 2 +- .../resource/resourceapply/migration.go | 7 +- .../resource/resourceapply/monitoring.go | 228 +- .../resource/resourceapply/networking.go | 59 + .../operator/resource/resourceapply/policy.go | 7 +- .../operator/resource/resourceapply/rbac.go | 25 +- .../resource/resourceapply/storage.go | 21 +- .../resource/resourceapply/unstructured.go | 11 +- .../resourceapply/volumesnapshotclass.go | 3 +- .../resource/resourcehelper/event_helpers.go | 43 + .../resourcehelper/resource_helpers.go | 3 + .../resource/resourcemerge/object_merger.go | 46 + .../resource/resourceread/admission.go | 18 + .../resource/resourceread/apiregistration.go | 26 + .../resource/resourceread/networking.go | 26 + .../operator/resourcesynccontroller/core.go | 61 + .../resourcesync_controller.go | 50 +- .../pkg/operator/v1helpers/canonicalize.go | 109 + .../pkg/operator/v1helpers/helpers.go | 15 + .../pkg/operator/v1helpers/informers.go | 15 + .../pkg/operator/v1helpers/interfaces.go | 22 + .../pkg/operator/v1helpers/test_helpers.go | 185 +- .../client_golang/prometheus/collectorfunc.go | 30 + .../client_golang/prometheus/promhttp/http.go | 26 +- .../promhttp/internal/compression.go | 21 + vendor/github.com/spf13/pflag/README.md | 27 + vendor/github.com/spf13/pflag/bool_func.go | 40 + vendor/github.com/spf13/pflag/count.go | 2 +- vendor/github.com/spf13/pflag/errors.go | 149 + vendor/github.com/spf13/pflag/flag.go | 85 +- vendor/github.com/spf13/pflag/func.go | 37 + vendor/github.com/spf13/pflag/golangflag.go | 22 + vendor/github.com/spf13/pflag/ipnet_slice.go | 2 +- vendor/github.com/spf13/pflag/text.go | 81 + vendor/github.com/spf13/pflag/time.go | 118 + .../otel/attribute/filter.go | 4 +- .../internal}/attribute.go | 2 +- .../otel/attribute/rawhelpers.go | 37 + .../otel/attribute/value.go | 15 +- .../go.opentelemetry.io/otel/internal/gen.go | 18 - .../otel/internal/rawhelpers.go | 48 - .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 ++++ .../otel/semconv/v1.26.0/doc.go | 9 + .../otel/semconv/v1.26.0/exception.go | 9 + .../otel/semconv/v1.26.0/metric.go | 1307 + .../otel/semconv/v1.26.0/schema.go | 9 + vendor/go.opentelemetry.io/otel/trace/auto.go | 662 + .../otel/trace/internal/telemetry/attr.go | 58 + .../otel/trace/internal/telemetry/doc.go | 8 + .../otel/trace/internal/telemetry/id.go | 103 + .../otel/trace/internal/telemetry/number.go | 67 + .../otel/trace/internal/telemetry/resource.go | 66 + .../otel/trace/internal/telemetry/scope.go | 67 + .../otel/trace/internal/telemetry/span.go | 472 + .../otel/trace/internal/telemetry/status.go | 42 + .../otel/trace/internal/telemetry/traces.go | 189 + .../otel/trace/internal/telemetry/value.go | 453 + vendor/go.opentelemetry.io/otel/trace/noop.go | 22 +- vendor/go.uber.org/automaxprocs/.codecov.yml | 14 + .../automaxprocs}/.gitignore | 15 +- vendor/go.uber.org/automaxprocs/CHANGELOG.md | 52 + .../automaxprocs/CODE_OF_CONDUCT.md | 75 + .../go.uber.org/automaxprocs/CONTRIBUTING.md | 81 + vendor/go.uber.org/automaxprocs/LICENSE | 19 + vendor/go.uber.org/automaxprocs/Makefile | 46 + vendor/go.uber.org/automaxprocs/README.md | 71 + .../go.uber.org/automaxprocs/automaxprocs.go | 33 + .../automaxprocs/internal/cgroups/cgroup.go | 79 + .../automaxprocs/internal/cgroups/cgroups.go | 118 + .../automaxprocs/internal/cgroups/cgroups2.go | 176 + .../automaxprocs/internal/cgroups/doc.go | 23 + .../automaxprocs/internal/cgroups/errors.go | 52 + .../internal/cgroups/mountpoint.go | 171 + .../automaxprocs/internal/cgroups/subsys.go | 103 + .../internal/runtime/cpu_quota_linux.go | 75 + .../internal/runtime/cpu_quota_unsupported.go | 31 + .../automaxprocs/internal/runtime/runtime.go | 40 + .../automaxprocs/maxprocs/maxprocs.go | 139 + .../automaxprocs/maxprocs/version.go | 24 + vendor/go.yaml.in/yaml/v2/.travis.yml | 17 + .../goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE | 0 .../yaml/v2}/LICENSE.libyaml | 0 .../goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE | 0 vendor/go.yaml.in/yaml/v2/README.md | 131 + .../goyaml.v2 => go.yaml.in/yaml/v2}/apic.go | 0 .../yaml/v2}/decode.go | 0 .../yaml/v2}/emitterc.go | 0 .../yaml/v2}/encode.go | 0 .../yaml/v2}/parserc.go | 0 .../yaml/v2}/readerc.go | 0 .../yaml/v2}/resolve.go | 0 .../yaml/v2}/scannerc.go | 0 .../yaml/v2}/sorter.go | 0 .../yaml/v2}/writerc.go | 0 .../goyaml.v2 => go.yaml.in/yaml/v2}/yaml.go | 2 +- .../goyaml.v2 => go.yaml.in/yaml/v2}/yamlh.go | 0 .../yaml/v2}/yamlprivateh.go | 0 vendor/golang.org/x/exp/LICENSE | 27 - vendor/golang.org/x/exp/PATENTS | 22 - vendor/golang.org/x/exp/maps/maps.go | 94 - vendor/golang.org/x/mod/module/module.go | 19 +- vendor/golang.org/x/mod/semver/semver.go | 30 +- vendor/golang.org/x/net/html/atom/table.go | 1256 +- vendor/golang.org/x/net/html/parse.go | 4 +- vendor/golang.org/x/net/html/token.go | 18 +- vendor/golang.org/x/net/http2/frame.go | 25 +- vendor/golang.org/x/net/http2/server.go | 5 +- vendor/golang.org/x/net/trace/events.go | 2 +- .../golang.org/x/net/websocket/websocket.go | 5 +- vendor/golang.org/x/oauth2/internal/doc.go | 2 +- vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- vendor/golang.org/x/oauth2/internal/token.go | 50 +- .../golang.org/x/oauth2/internal/transport.go | 4 +- vendor/golang.org/x/oauth2/oauth2.go | 55 +- vendor/golang.org/x/oauth2/pkce.go | 15 +- vendor/golang.org/x/oauth2/token.go | 17 +- vendor/golang.org/x/oauth2/transport.go | 24 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 111 +- .../golang.org/x/sys/unix/syscall_darwin.go | 149 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 42 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 84 + .../x/sys/unix/zsyscall_darwin_amd64.s | 20 + .../x/sys/unix/zsyscall_darwin_arm64.go | 84 + .../x/sys/unix/zsyscall_darwin_arm64.s | 20 + .../golang.org/x/sys/windows/registry/key.go | 13 +- .../x/sys/windows/registry/value.go | 6 +- .../x/sys/windows/security_windows.go | 49 +- .../x/sys/windows/syscall_windows.go | 6 +- .../golang.org/x/sys/windows/types_windows.go | 239 + .../x/sys/windows/zsyscall_windows.go | 9 + vendor/golang.org/x/term/terminal.go | 77 +- vendor/golang.org/x/time/rate/rate.go | 17 +- .../x/tools/go/ast/astutil/imports.go | 3 +- .../x/tools/go/ast/astutil/rewrite.go | 2 +- .../golang.org/x/tools/go/ast/astutil/util.go | 2 + .../x/tools/go/ast/inspector/inspector.go | 6 +- .../x/tools/go/ast/inspector/typeof.go | 2 +- .../x/tools/go/gcexportdata/gcexportdata.go | 5 +- .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 2 - .../x/tools/go/packages/packages.go | 22 +- .../x/tools/go/types/typeutil/callee.go | 83 +- .../x/tools/go/types/typeutil/map.go | 9 +- .../x/tools/internal/event/keys/keys.go | 6 +- .../x/tools/internal/event/label/label.go | 13 +- .../x/tools/internal/gcimporter/bimport.go | 2 +- .../x/tools/internal/gcimporter/iexport.go | 40 +- .../x/tools/internal/gcimporter/iimport.go | 5 +- .../tools/internal/gcimporter/ureader_yes.go | 2 +- .../x/tools/internal/gocommand/invoke.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 11 +- .../x/tools/internal/imports/fix.go | 15 +- .../x/tools/internal/imports/mod.go | 5 +- .../x/tools/internal/imports/mod_cache.go | 4 +- .../x/tools/internal/imports/sortimports.go | 5 +- .../x/tools/internal/modindex/lookup.go | 4 +- .../internal/packagesinternal/packages.go | 3 - .../x/tools/internal/pkgbits/decoder.go | 2 +- .../x/tools/internal/stdlib/deps.go | 359 + .../x/tools/internal/stdlib/import.go | 89 + .../x/tools/internal/stdlib/manifest.go | 34624 ++++++++-------- .../x/tools/internal/stdlib/stdlib.go | 10 +- .../x/tools/internal/typeparams/free.go | 2 +- .../x/tools/internal/typeparams/normalize.go | 2 +- .../x/tools/internal/typeparams/termlist.go | 12 +- .../x/tools/internal/typeparams/typeterm.go | 3 + .../internal/typesinternal/classify_call.go | 135 + .../x/tools/internal/typesinternal/types.go | 21 +- .../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 17 +- .../googleapis/rpc/status/status.pb.go | 2 +- .../grpc/balancer/balancer.go | 22 +- .../grpc/balancer/base/balancer.go | 12 +- .../endpointsharding/endpointsharding.go | 356 + .../pickfirst/pickfirstleaf/pickfirstleaf.go | 45 +- .../grpc/balancer/roundrobin/roundrobin.go | 70 +- .../grpc/balancer/subconn.go | 2 +- .../grpc/balancer_wrapper.go | 9 +- .../grpc_binarylog_v1/binarylog.pb.go | 118 +- vendor/google.golang.org/grpc/clientconn.go | 50 +- vendor/google.golang.org/grpc/dialoptions.go | 34 +- .../balancer/gracefulswitch/gracefulswitch.go | 10 +- .../grpc/internal/envconfig/envconfig.go | 20 +- .../grpc/internal/envconfig/xds.go | 10 +- .../grpc/internal/grpcsync/oncefunc.go | 32 - .../grpc/internal/internal.go | 36 + .../grpc/internal/metadata/metadata.go | 26 +- .../proxyattributes/proxyattributes.go | 54 + .../delegatingresolver/delegatingresolver.go | 427 + .../grpc/internal/transport/client_stream.go | 2 +- .../grpc/internal/transport/http2_client.go | 17 +- .../grpc/internal/transport/http2_server.go | 23 +- .../grpc/internal/transport/http_util.go | 4 +- .../grpc/internal/transport/proxy.go | 62 +- .../grpc/internal/transport/server_stream.go | 6 +- .../grpc/internal/transport/transport.go | 2 - .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/resolver/map.go | 174 +- .../grpc/resolver/resolver.go | 3 + .../grpc/resolver_wrapper.go | 36 +- vendor/google.golang.org/grpc/rpc_util.go | 83 +- vendor/google.golang.org/grpc/server.go | 23 +- vendor/google.golang.org/grpc/stats/stats.go | 35 +- vendor/google.golang.org/grpc/stream.go | 300 +- vendor/google.golang.org/grpc/version.go | 2 +- .../editiondefaults/editions_defaults.binpb | Bin 138 -> 146 bytes .../internal/editionssupport/editions.go | 18 - .../protobuf/internal/filedesc/editions.go | 3 + .../protobuf/internal/genid/descriptor_gen.go | 16 + ...ings_unsafe_go121.go => strings_unsafe.go} | 2 - .../internal/strs/strings_unsafe_go120.go | 94 - .../protobuf/internal/version/version.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 6 + .../protobuf/reflect/protodesc/desc.go | 286 - .../protobuf/reflect/protodesc/desc_init.go | 288 - .../reflect/protodesc/desc_resolve.go | 291 - .../reflect/protodesc/desc_validate.go | 359 - .../protobuf/reflect/protodesc/editions.go | 181 - .../protobuf/reflect/protodesc/proto.go | 271 - .../reflect/protoreflect/source_gen.go | 2 + ...{value_unsafe_go121.go => value_unsafe.go} | 2 - .../protoreflect/value_unsafe_go120.go | 98 - .../types/descriptorpb/descriptor.pb.go | 1439 +- .../types/gofeaturespb/go_features.pb.go | 345 - .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 25 +- .../types/known/timestamppb/timestamp.pb.go | 25 +- vendor/k8s.io/api/admission/v1/doc.go | 2 +- vendor/k8s.io/api/admission/v1beta1/doc.go | 2 +- .../api/admissionregistration/v1/doc.go | 2 +- .../api/admissionregistration/v1alpha1/doc.go | 2 +- .../v1alpha1/generated.proto | 13 +- .../admissionregistration/v1alpha1/types.go | 23 +- .../v1alpha1/types_swagger_doc_generated.go | 8 +- .../api/admissionregistration/v1beta1/doc.go | 2 +- vendor/k8s.io/api/apidiscovery/v2/doc.go | 2 +- vendor/k8s.io/api/apidiscovery/v2beta1/doc.go | 2 +- .../api/apiserverinternal/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/apps/v1/doc.go | 2 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 336 +- vendor/k8s.io/api/apps/v1/generated.proto | 41 +- vendor/k8s.io/api/apps/v1/types.go | 41 +- .../apps/v1/types_swagger_doc_generated.go | 24 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/apps/v1beta1/doc.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 286 +- .../k8s.io/api/apps/v1beta1/generated.proto | 22 +- vendor/k8s.io/api/apps/v1beta1/types.go | 22 +- .../v1beta1/types_swagger_doc_generated.go | 15 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/apps/v1beta2/doc.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 352 +- .../k8s.io/api/apps/v1beta2/generated.proto | 41 +- vendor/k8s.io/api/apps/v1beta2/types.go | 41 +- .../v1beta2/types_swagger_doc_generated.go | 24 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/authentication/v1/doc.go | 2 +- .../k8s.io/api/authentication/v1alpha1/doc.go | 2 +- .../k8s.io/api/authentication/v1beta1/doc.go | 2 +- vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../k8s.io/api/authorization/v1beta1/doc.go | 2 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 2 +- vendor/k8s.io/api/autoscaling/v2/doc.go | 2 +- .../k8s.io/api/autoscaling/v2/generated.pb.go | 272 +- .../k8s.io/api/autoscaling/v2/generated.proto | 30 +- vendor/k8s.io/api/autoscaling/v2/types.go | 30 +- .../v2/types_swagger_doc_generated.go | 5 +- .../autoscaling/v2/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 2 +- vendor/k8s.io/api/batch/v1/doc.go | 2 +- vendor/k8s.io/api/batch/v1/generated.proto | 10 - vendor/k8s.io/api/batch/v1/types.go | 15 - .../batch/v1/types_swagger_doc_generated.go | 10 +- vendor/k8s.io/api/batch/v1beta1/doc.go | 2 +- vendor/k8s.io/api/certificates/v1/doc.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 +- .../api/certificates/v1beta1/generated.pb.go | 761 +- .../api/certificates/v1beta1/generated.proto | 73 + .../api/certificates/v1beta1/register.go | 2 + .../k8s.io/api/certificates/v1beta1/types.go | 85 + .../v1beta1/types_swagger_doc_generated.go | 30 + .../v1beta1/zz_generated.deepcopy.go | 76 + .../zz_generated.prerelease-lifecycle.go | 36 + vendor/k8s.io/api/coordination/v1/doc.go | 2 +- .../k8s.io/api/coordination/v1alpha2/doc.go | 2 +- .../api/coordination/v1alpha2/generated.proto | 2 - .../k8s.io/api/coordination/v1alpha2/types.go | 2 - .../v1alpha2/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/coordination/v1beta1/doc.go | 2 +- .../api/coordination/v1beta1/generated.pb.go | 915 +- .../api/coordination/v1beta1/generated.proto | 69 + .../api/coordination/v1beta1/register.go | 2 + .../k8s.io/api/coordination/v1beta1/types.go | 73 + .../v1beta1/types_swagger_doc_generated.go | 34 + .../v1beta1/zz_generated.deepcopy.go | 84 + .../zz_generated.prerelease-lifecycle.go | 36 + vendor/k8s.io/api/core/v1/doc.go | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 2767 +- vendor/k8s.io/api/core/v1/generated.proto | 77 +- vendor/k8s.io/api/core/v1/lifecycle.go | 24 + vendor/k8s.io/api/core/v1/types.go | 201 +- .../core/v1/types_swagger_doc_generated.go | 50 +- .../api/core/v1/zz_generated.deepcopy.go | 38 +- vendor/k8s.io/api/discovery/v1/doc.go | 2 +- .../k8s.io/api/discovery/v1/generated.pb.go | 336 +- .../k8s.io/api/discovery/v1/generated.proto | 78 +- vendor/k8s.io/api/discovery/v1/types.go | 78 +- .../v1/types_swagger_doc_generated.go | 28 +- .../api/discovery/v1/zz_generated.deepcopy.go | 21 + vendor/k8s.io/api/discovery/v1beta1/doc.go | 2 +- .../api/discovery/v1beta1/generated.pb.go | 329 +- .../api/discovery/v1beta1/generated.proto | 13 + vendor/k8s.io/api/discovery/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 10 + .../v1beta1/zz_generated.deepcopy.go | 21 + vendor/k8s.io/api/events/v1/doc.go | 2 +- vendor/k8s.io/api/events/v1beta1/doc.go | 2 +- vendor/k8s.io/api/extensions/v1beta1/doc.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 418 +- .../api/extensions/v1beta1/generated.proto | 40 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 40 +- .../v1beta1/types_swagger_doc_generated.go | 24 +- .../v1beta1/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/flowcontrol/v1/doc.go | 2 +- vendor/k8s.io/api/flowcontrol/v1beta1/doc.go | 2 +- vendor/k8s.io/api/flowcontrol/v1beta2/doc.go | 2 +- vendor/k8s.io/api/flowcontrol/v1beta3/doc.go | 2 +- vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/networking/v1/doc.go | 2 +- .../k8s.io/api/networking/v1/generated.pb.go | 3729 +- .../k8s.io/api/networking/v1/generated.proto | 109 + vendor/k8s.io/api/networking/v1/register.go | 4 + vendor/k8s.io/api/networking/v1/types.go | 130 + .../v1/types_swagger_doc_generated.go | 80 + .../api/networking/v1/well_known_labels.go | 33 + .../networking/v1/zz_generated.deepcopy.go | 202 + .../v1/zz_generated.prerelease-lifecycle.go | 24 + vendor/k8s.io/api/networking/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/networking/v1beta1/doc.go | 2 +- vendor/k8s.io/api/node/v1/doc.go | 2 +- vendor/k8s.io/api/node/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/node/v1beta1/doc.go | 2 +- vendor/k8s.io/api/policy/v1/doc.go | 2 +- vendor/k8s.io/api/policy/v1/generated.proto | 3 - vendor/k8s.io/api/policy/v1/types.go | 3 - .../policy/v1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.proto | 3 - vendor/k8s.io/api/policy/v1beta1/types.go | 3 - .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 +- vendor/k8s.io/api/resource/v1alpha3/doc.go | 2 +- .../api/resource/v1alpha3/generated.pb.go | 6403 ++- .../api/resource/v1alpha3/generated.proto | 514 +- .../k8s.io/api/resource/v1alpha3/register.go | 2 + vendor/k8s.io/api/resource/v1alpha3/types.go | 609 +- .../v1alpha3/types_swagger_doc_generated.go | 165 +- .../v1alpha3/zz_generated.deepcopy.go | 327 +- .../zz_generated.prerelease-lifecycle.go | 36 + .../api/resource/v1beta1/devicetaint.go | 35 + vendor/k8s.io/api/resource/v1beta1/doc.go | 2 +- .../api/resource/v1beta1/generated.pb.go | 3890 +- .../api/resource/v1beta1/generated.proto | 428 +- vendor/k8s.io/api/resource/v1beta1/types.go | 506 +- .../v1beta1/types_swagger_doc_generated.go | 124 +- .../resource/v1beta1/zz_generated.deepcopy.go | 202 +- .../api/resource/v1beta2/devicetaint.go | 35 + vendor/k8s.io/api/resource/v1beta2/doc.go | 24 + .../api/resource/v1beta2/generated.pb.go | 11047 +++++ .../api/resource/v1beta2/generated.proto | 1278 + .../k8s.io/api/resource/v1beta2/register.go | 60 + vendor/k8s.io/api/resource/v1beta2/types.go | 1552 + .../v1beta2/types_swagger_doc_generated.go | 464 + .../resource/v1beta2/zz_generated.deepcopy.go | 1092 + .../zz_generated.prerelease-lifecycle.go | 166 + vendor/k8s.io/api/scheduling/v1/doc.go | 2 +- vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 2 +- vendor/k8s.io/api/storage/v1/doc.go | 2 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 271 +- vendor/k8s.io/api/storage/v1/generated.proto | 22 + vendor/k8s.io/api/storage/v1/types.go | 22 + .../storage/v1/types_swagger_doc_generated.go | 26 +- .../api/storage/v1/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/storage/v1alpha1/doc.go | 2 +- .../api/storage/v1alpha1/generated.pb.go | 160 +- .../api/storage/v1alpha1/generated.proto | 8 + vendor/k8s.io/api/storage/v1alpha1/types.go | 8 + .../v1alpha1/types_swagger_doc_generated.go | 7 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/storage/v1beta1/doc.go | 2 +- .../api/storage/v1beta1/generated.pb.go | 280 +- .../api/storage/v1beta1/generated.proto | 22 + vendor/k8s.io/api/storage/v1beta1/types.go | 22 + .../v1beta1/types_swagger_doc_generated.go | 26 +- .../storage/v1beta1/zz_generated.deepcopy.go | 10 + .../api/storagemigration/v1alpha1/doc.go | 2 +- .../pkg/apis/apiextensions/doc.go | 2 +- .../pkg/apis/apiextensions/v1/doc.go | 2 +- .../pkg/apis/apiextensions/v1beta1/doc.go | 2 +- .../apiextensions/v1/apiextensions_client.go | 12 +- .../v1beta1/apiextensions_client.go | 12 +- .../k8s.io/apimachinery/pkg/api/errors/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 + .../pkg/api/operation/operation.go | 56 + .../apimachinery/pkg/api/validation/doc.go | 2 +- .../pkg/api/validation/generic.go | 2 +- .../pkg/apis/meta/internalversion/doc.go | 2 +- .../apis/meta/internalversion/scheme/doc.go | 2 +- .../pkg/apis/meta/internalversion/types.go | 2 - .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/micro_time_fuzz.go | 13 +- .../pkg/apis/meta/v1/time_fuzz.go | 13 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 31 +- .../apis/meta/v1/unstructured/unstructured.go | 4 +- .../pkg/apis/meta/v1/validation/validation.go | 2 +- .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 2 +- .../k8s.io/apimachinery/pkg/conversion/doc.go | 2 +- .../pkg/conversion/queryparams/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/fields/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/labels/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 1 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 39 + .../serializer/cbor/internal/modes/custom.go | 4 +- .../pkg/runtime/serializer/codec_factory.go | 23 +- .../runtime/serializer/json/collections.go | 230 + .../pkg/runtime/serializer/json/json.go | 16 +- .../serializer/protobuf/collections.go | 174 + .../pkg/runtime/serializer/protobuf/doc.go | 2 +- .../runtime/serializer/protobuf/protobuf.go | 87 +- .../apimachinery/pkg/runtime/types_proto.go | 127 +- vendor/k8s.io/apimachinery/pkg/types/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 2 +- .../apimachinery/pkg/util/errors/doc.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 6 +- .../apimachinery/pkg/util/httpstream/doc.go | 2 +- .../pkg/util/httpstream/wsstream/doc.go | 2 +- .../pkg/util/intstr/instr_fuzz.go | 14 +- .../k8s.io/apimachinery/pkg/util/proxy/doc.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 46 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 2 +- .../util/validation/field/error_matcher.go | 212 + .../pkg/util/validation/field/errors.go | 132 +- .../apimachinery/pkg/util/validation/ip.go | 278 + .../pkg/util/validation/validation.go | 40 - .../apimachinery/pkg/util/version/doc.go | 2 +- .../apimachinery/pkg/util/version/version.go | 18 +- .../apimachinery/pkg/util/wait/backoff.go | 50 +- .../k8s.io/apimachinery/pkg/util/wait/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/wait/loop.go | 4 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 9 +- .../apimachinery/pkg/util/yaml/decoder.go | 170 +- .../pkg/util/yaml/stream_reader.go | 130 + vendor/k8s.io/apimachinery/pkg/version/doc.go | 4 +- .../k8s.io/apimachinery/pkg/version/types.go | 28 +- vendor/k8s.io/apimachinery/pkg/watch/doc.go | 2 +- .../apimachinery/pkg/watch/streamwatcher.go | 15 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 35 +- .../apiserver/pkg/authentication/user/doc.go | 2 +- .../apps/v1/deploymentstatus.go | 9 + .../apps/v1/replicasetstatus.go | 9 + .../apps/v1beta1/deploymentstatus.go | 9 + .../apps/v1beta2/deploymentstatus.go | 9 + .../apps/v1beta2/replicasetstatus.go | 9 + .../autoscaling/v2/hpascalingrules.go | 10 + .../v1beta1/clustertrustbundle.go | 253 + .../v1beta1/clustertrustbundlespec.go | 48 + .../coordination/v1beta1/leasecandidate.go | 255 + .../v1beta1/leasecandidatespec.go | 89 + .../core/v1/containerstatus.go | 9 + .../applyconfigurations/core/v1/lifecycle.go | 17 +- .../core/v1/nodeswapstatus.go | 39 + .../core/v1/nodesysteminfo.go | 29 +- .../core/v1/podcondition.go | 9 + .../applyconfigurations/core/v1/podstatus.go | 9 + .../discovery/v1/endpointhints.go | 14 + .../discovery/v1/fornode.go | 39 + .../discovery/v1beta1/endpointhints.go | 14 + .../discovery/v1beta1/fornode.go | 39 + .../extensions/v1beta1/deploymentstatus.go | 9 + .../extensions/v1beta1/replicasetstatus.go | 9 + .../applyconfigurations/internal/internal.go | 1344 +- .../networking/v1/ipaddress.go | 253 + .../networking/v1/ipaddressspec.go | 39 + .../networking/v1/parentreference.go | 66 + .../networking/v1/servicecidr.go | 262 + .../networking/v1/servicecidrspec.go | 41 + .../networking/v1/servicecidrstatus.go | 48 + .../resource/v1alpha3/basicdevice.go | 60 +- .../resource/v1alpha3/counter.go | 43 + .../resource/v1alpha3/counterset.go | 54 + .../v1alpha3/devicecounterconsumption.go | 54 + .../resource/v1alpha3/devicerequest.go | 28 + .../v1alpha3/devicerequestallocationresult.go | 24 +- .../resource/v1alpha3/devicesubrequest.go | 98 + .../resource/v1alpha3/devicetaint.go | 71 + .../resource/v1alpha3/devicetaintrule.go | 253 + .../resource/v1alpha3/devicetaintrulespec.go | 48 + .../resource/v1alpha3/devicetaintselector.go | 80 + .../resource/v1alpha3/devicetoleration.go | 79 + .../resource/v1alpha3/resourceslicespec.go | 35 +- .../resource/v1beta1/basicdevice.go | 60 +- .../resource/v1beta1/counter.go | 43 + .../resource/v1beta1/counterset.go | 54 + .../v1beta1/devicecounterconsumption.go | 54 + .../resource/v1beta1/devicerequest.go | 28 + .../v1beta1/devicerequestallocationresult.go | 24 +- .../resource/v1beta1/devicesubrequest.go | 98 + .../resource/v1beta1/devicetaint.go | 71 + .../resource/v1beta1/devicetoleration.go | 79 + .../resource/v1beta1/resourceslicespec.go | 35 +- .../resource/v1beta2/allocateddevicestatus.go | 94 + .../resource/v1beta2/allocationresult.go | 52 + .../resource/v1beta2/celdeviceselector.go | 39 + .../resource/v1beta2/counter.go | 43 + .../resource/v1beta2/counterset.go | 54 + .../resource/v1beta2/device.go | 129 + .../v1beta2/deviceallocationconfiguration.go | 63 + .../v1beta2/deviceallocationresult.go | 58 + .../resource/v1beta2/deviceattribute.go | 66 + .../resource/v1beta2/devicecapacity.go | 43 + .../resource/v1beta2/deviceclaim.go | 72 + .../v1beta2/deviceclaimconfiguration.go | 50 + .../resource/v1beta2/deviceclass.go | 253 + .../v1beta2/deviceclassconfiguration.go | 39 + .../resource/v1beta2/deviceclassspec.go | 58 + .../resource/v1beta2/deviceconfiguration.go | 39 + .../resource/v1beta2/deviceconstraint.go | 54 + .../v1beta2/devicecounterconsumption.go | 54 + .../resource/v1beta2/devicerequest.go | 62 + .../v1beta2/devicerequestallocationresult.go | 89 + .../resource/v1beta2/deviceselector.go | 39 + .../resource/v1beta2/devicesubrequest.go | 98 + .../resource/v1beta2/devicetaint.go | 71 + .../resource/v1beta2/devicetoleration.go | 79 + .../resource/v1beta2/exactdevicerequest.go | 98 + .../resource/v1beta2/networkdevicedata.go | 59 + .../v1beta2/opaquedeviceconfiguration.go | 52 + .../resource/v1beta2/resourceclaim.go | 264 + .../v1beta2/resourceclaimconsumerreference.go | 70 + .../resource/v1beta2/resourceclaimspec.go | 39 + .../resource/v1beta2/resourceclaimstatus.go | 67 + .../resource/v1beta2/resourceclaimtemplate.go | 255 + .../v1beta2/resourceclaimtemplatespec.go | 194 + .../resource/v1beta2/resourcepool.go | 57 + .../resource/v1beta2/resourceslice.go | 253 + .../resource/v1beta2/resourceslicespec.go | 116 + .../storage/v1/csidriverspec.go | 25 +- .../storage/v1/volumeerror.go | 13 +- .../storage/v1alpha1/volumeerror.go | 13 +- .../storage/v1beta1/csidriverspec.go | 25 +- .../storage/v1beta1/volumeerror.go | 13 +- .../discovery/aggregated_discovery.go | 2 +- .../client-go/discovery/discovery_client.go | 3 +- vendor/k8s.io/client-go/discovery/doc.go | 2 +- .../client-go/features/known_features.go | 7 + vendor/k8s.io/client-go/gentype/fake.go | 1 + .../v1/mutatingwebhookconfiguration.go | 16 +- .../v1/validatingadmissionpolicy.go | 16 +- .../v1/validatingadmissionpolicybinding.go | 16 +- .../v1/validatingwebhookconfiguration.go | 16 +- .../v1alpha1/mutatingadmissionpolicy.go | 16 +- .../mutatingadmissionpolicybinding.go | 16 +- .../v1alpha1/validatingadmissionpolicy.go | 16 +- .../validatingadmissionpolicybinding.go | 16 +- .../v1beta1/mutatingwebhookconfiguration.go | 16 +- .../v1beta1/validatingadmissionpolicy.go | 16 +- .../validatingadmissionpolicybinding.go | 16 +- .../v1beta1/validatingwebhookconfiguration.go | 16 +- .../v1alpha1/storageversion.go | 16 +- .../informers/apps/v1/controllerrevision.go | 16 +- .../client-go/informers/apps/v1/daemonset.go | 16 +- .../client-go/informers/apps/v1/deployment.go | 16 +- .../client-go/informers/apps/v1/replicaset.go | 16 +- .../informers/apps/v1/statefulset.go | 16 +- .../apps/v1beta1/controllerrevision.go | 16 +- .../informers/apps/v1beta1/deployment.go | 16 +- .../informers/apps/v1beta1/statefulset.go | 16 +- .../apps/v1beta2/controllerrevision.go | 16 +- .../informers/apps/v1beta2/daemonset.go | 16 +- .../informers/apps/v1beta2/deployment.go | 16 +- .../informers/apps/v1beta2/replicaset.go | 16 +- .../informers/apps/v1beta2/statefulset.go | 16 +- .../autoscaling/v1/horizontalpodautoscaler.go | 16 +- .../autoscaling/v2/horizontalpodautoscaler.go | 16 +- .../v2beta1/horizontalpodautoscaler.go | 16 +- .../v2beta2/horizontalpodautoscaler.go | 16 +- .../client-go/informers/batch/v1/cronjob.go | 16 +- .../client-go/informers/batch/v1/job.go | 16 +- .../informers/batch/v1beta1/cronjob.go | 16 +- .../v1/certificatesigningrequest.go | 16 +- .../v1alpha1/clustertrustbundle.go | 16 +- .../v1beta1/certificatesigningrequest.go | 16 +- .../v1beta1/clustertrustbundle.go | 101 + .../certificates/v1beta1/interface.go | 7 + .../informers/coordination/v1/lease.go | 16 +- .../coordination/v1alpha2/leasecandidate.go | 16 +- .../coordination/v1beta1/interface.go | 7 + .../informers/coordination/v1beta1/lease.go | 16 +- .../coordination/v1beta1/leasecandidate.go | 102 + .../informers/core/v1/componentstatus.go | 16 +- .../client-go/informers/core/v1/configmap.go | 16 +- .../client-go/informers/core/v1/endpoints.go | 16 +- .../client-go/informers/core/v1/event.go | 16 +- .../client-go/informers/core/v1/limitrange.go | 16 +- .../client-go/informers/core/v1/namespace.go | 16 +- .../client-go/informers/core/v1/node.go | 16 +- .../informers/core/v1/persistentvolume.go | 16 +- .../core/v1/persistentvolumeclaim.go | 16 +- .../k8s.io/client-go/informers/core/v1/pod.go | 16 +- .../informers/core/v1/podtemplate.go | 16 +- .../core/v1/replicationcontroller.go | 16 +- .../informers/core/v1/resourcequota.go | 16 +- .../client-go/informers/core/v1/secret.go | 16 +- .../client-go/informers/core/v1/service.go | 16 +- .../informers/core/v1/serviceaccount.go | 16 +- .../informers/discovery/v1/endpointslice.go | 16 +- .../discovery/v1beta1/endpointslice.go | 16 +- vendor/k8s.io/client-go/informers/doc.go | 2 +- .../client-go/informers/events/v1/event.go | 16 +- .../informers/events/v1beta1/event.go | 16 +- .../informers/extensions/v1beta1/daemonset.go | 16 +- .../extensions/v1beta1/deployment.go | 16 +- .../informers/extensions/v1beta1/ingress.go | 16 +- .../extensions/v1beta1/networkpolicy.go | 16 +- .../extensions/v1beta1/replicaset.go | 16 +- .../informers/flowcontrol/v1/flowschema.go | 16 +- .../v1/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta1/flowschema.go | 16 +- .../v1beta1/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta2/flowschema.go | 16 +- .../v1beta2/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta3/flowschema.go | 16 +- .../v1beta3/prioritylevelconfiguration.go | 16 +- vendor/k8s.io/client-go/informers/generic.go | 21 + .../informers/networking/v1/ingress.go | 16 +- .../informers/networking/v1/ingressclass.go | 16 +- .../informers/networking/v1/interface.go | 14 + .../informers/networking/v1/ipaddress.go | 101 + .../informers/networking/v1/networkpolicy.go | 16 +- .../informers/networking/v1/servicecidr.go | 101 + .../networking/v1alpha1/ipaddress.go | 16 +- .../networking/v1alpha1/servicecidr.go | 16 +- .../informers/networking/v1beta1/ingress.go | 16 +- .../networking/v1beta1/ingressclass.go | 16 +- .../informers/networking/v1beta1/ipaddress.go | 16 +- .../networking/v1beta1/servicecidr.go | 16 +- .../informers/node/v1/runtimeclass.go | 16 +- .../informers/node/v1alpha1/runtimeclass.go | 16 +- .../informers/node/v1beta1/runtimeclass.go | 16 +- .../policy/v1/poddisruptionbudget.go | 16 +- .../policy/v1beta1/poddisruptionbudget.go | 16 +- .../informers/rbac/v1/clusterrole.go | 16 +- .../informers/rbac/v1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1/role.go | 16 +- .../informers/rbac/v1/rolebinding.go | 16 +- .../informers/rbac/v1alpha1/clusterrole.go | 16 +- .../rbac/v1alpha1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1alpha1/role.go | 16 +- .../informers/rbac/v1alpha1/rolebinding.go | 16 +- .../informers/rbac/v1beta1/clusterrole.go | 16 +- .../rbac/v1beta1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1beta1/role.go | 16 +- .../informers/rbac/v1beta1/rolebinding.go | 16 +- .../client-go/informers/resource/interface.go | 8 + .../resource/v1alpha3/deviceclass.go | 16 +- .../resource/v1alpha3/devicetaintrule.go | 101 + .../informers/resource/v1alpha3/interface.go | 7 + .../resource/v1alpha3/resourceclaim.go | 16 +- .../v1alpha3/resourceclaimtemplate.go | 16 +- .../resource/v1alpha3/resourceslice.go | 16 +- .../informers/resource/v1beta1/deviceclass.go | 16 +- .../resource/v1beta1/resourceclaim.go | 16 +- .../resource/v1beta1/resourceclaimtemplate.go | 16 +- .../resource/v1beta1/resourceslice.go | 16 +- .../informers/resource/v1beta2/deviceclass.go | 101 + .../informers/resource/v1beta2/interface.go | 66 + .../resource/v1beta2/resourceclaim.go | 102 + .../resource/v1beta2/resourceclaimtemplate.go | 102 + .../resource/v1beta2/resourceslice.go | 101 + .../informers/scheduling/v1/priorityclass.go | 16 +- .../scheduling/v1alpha1/priorityclass.go | 16 +- .../scheduling/v1beta1/priorityclass.go | 16 +- .../informers/storage/v1/csidriver.go | 16 +- .../client-go/informers/storage/v1/csinode.go | 16 +- .../storage/v1/csistoragecapacity.go | 16 +- .../informers/storage/v1/storageclass.go | 16 +- .../informers/storage/v1/volumeattachment.go | 16 +- .../storage/v1alpha1/csistoragecapacity.go | 16 +- .../storage/v1alpha1/volumeattachment.go | 16 +- .../storage/v1alpha1/volumeattributesclass.go | 16 +- .../informers/storage/v1beta1/csidriver.go | 16 +- .../informers/storage/v1beta1/csinode.go | 16 +- .../storage/v1beta1/csistoragecapacity.go | 16 +- .../informers/storage/v1beta1/storageclass.go | 16 +- .../storage/v1beta1/volumeattachment.go | 16 +- .../storage/v1beta1/volumeattributesclass.go | 16 +- .../v1alpha1/storageversionmigration.go | 16 +- .../k8s.io/client-go/kubernetes/clientset.go | 13 + vendor/k8s.io/client-go/kubernetes/doc.go | 2 +- vendor/k8s.io/client-go/kubernetes/import.go | 2 +- .../client-go/kubernetes/scheme/register.go | 2 + .../v1/admissionregistration_client.go | 12 +- .../v1alpha1/admissionregistration_client.go | 12 +- .../v1beta1/admissionregistration_client.go | 12 +- .../v1alpha1/apiserverinternal_client.go | 12 +- .../kubernetes/typed/apps/v1/apps_client.go | 12 +- .../typed/apps/v1beta1/apps_client.go | 12 +- .../typed/apps/v1beta2/apps_client.go | 12 +- .../v1/authentication_client.go | 12 +- .../v1alpha1/authentication_client.go | 12 +- .../v1beta1/authentication_client.go | 12 +- .../authorization/v1/authorization_client.go | 12 +- .../v1beta1/authorization_client.go | 12 +- .../autoscaling/v1/autoscaling_client.go | 12 +- .../autoscaling/v2/autoscaling_client.go | 12 +- .../autoscaling/v2beta1/autoscaling_client.go | 12 +- .../autoscaling/v2beta2/autoscaling_client.go | 12 +- .../kubernetes/typed/batch/v1/batch_client.go | 12 +- .../typed/batch/v1beta1/batch_client.go | 12 +- .../certificates/v1/certificates_client.go | 12 +- .../v1alpha1/certificates_client.go | 12 +- .../v1beta1/certificates_client.go | 17 +- .../v1beta1/clustertrustbundle.go | 73 + .../v1beta1/generated_expansion.go | 2 + .../coordination/v1/coordination_client.go | 12 +- .../v1alpha2/coordination_client.go | 12 +- .../v1beta1/coordination_client.go | 17 +- .../v1beta1/generated_expansion.go | 2 + .../coordination/v1beta1/leasecandidate.go | 71 + .../kubernetes/typed/core/v1/core_client.go | 12 +- .../typed/core/v1/event_expansion.go | 65 +- .../typed/discovery/v1/discovery_client.go | 12 +- .../discovery/v1beta1/discovery_client.go | 12 +- .../typed/events/v1/events_client.go | 12 +- .../typed/events/v1beta1/event_expansion.go | 6 +- .../typed/events/v1beta1/events_client.go | 12 +- .../extensions/v1beta1/extensions_client.go | 12 +- .../flowcontrol/v1/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta1/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta2/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta3/flowcontrol_client.go | 12 +- .../networking/v1/generated_expansion.go | 4 + .../typed/networking/v1/ipaddress.go | 71 + .../typed/networking/v1/networking_client.go | 22 +- .../typed/networking/v1/servicecidr.go | 75 + .../networking/v1alpha1/networking_client.go | 12 +- .../networking/v1beta1/networking_client.go | 12 +- .../kubernetes/typed/node/v1/node_client.go | 12 +- .../typed/node/v1alpha1/node_client.go | 12 +- .../typed/node/v1beta1/node_client.go | 12 +- .../typed/policy/v1/policy_client.go | 12 +- .../typed/policy/v1beta1/policy_client.go | 12 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 12 +- .../typed/rbac/v1alpha1/rbac_client.go | 12 +- .../typed/rbac/v1beta1/rbac_client.go | 12 +- .../resource/v1alpha3/devicetaintrule.go | 71 + .../resource/v1alpha3/generated_expansion.go | 2 + .../resource/v1alpha3/resource_client.go | 17 +- .../typed/resource/v1beta1/resource_client.go | 12 +- .../typed/resource/v1beta2/deviceclass.go | 71 + .../kubernetes/typed/resource/v1beta2}/doc.go | 8 +- .../resource/v1beta2/generated_expansion.go} | 24 +- .../typed/resource/v1beta2/resource_client.go | 116 + .../typed/resource/v1beta2/resourceclaim.go | 75 + .../resource/v1beta2/resourceclaimtemplate.go | 71 + .../typed/resource/v1beta2/resourceslice.go | 71 + .../typed/scheduling/v1/scheduling_client.go | 12 +- .../scheduling/v1alpha1/scheduling_client.go | 12 +- .../scheduling/v1beta1/scheduling_client.go | 12 +- .../typed/storage/v1/storage_client.go | 12 +- .../typed/storage/v1alpha1/storage_client.go | 12 +- .../typed/storage/v1beta1/storage_client.go | 12 +- .../v1alpha1/storagemigration_client.go | 12 +- .../v1beta1/clustertrustbundle.go | 48 + .../v1beta1/expansion_generated.go | 4 + .../v1beta1/expansion_generated.go | 8 + .../coordination/v1beta1/leasecandidate.go | 70 + vendor/k8s.io/client-go/listers/doc.go | 2 +- .../networking/v1/expansion_generated.go | 8 + .../listers/networking/v1/ipaddress.go | 48 + .../listers/networking/v1/servicecidr.go | 48 + .../resource/v1alpha3/devicetaintrule.go | 48 + .../resource/v1alpha3/expansion_generated.go | 4 + .../listers/resource/v1beta2/deviceclass.go | 48 + .../resource/v1beta2/expansion_generated.go | 43 + .../listers/resource/v1beta2/resourceclaim.go | 70 + .../resource/v1beta2/resourceclaimtemplate.go | 70 + .../listers/resource/v1beta2/resourceslice.go | 48 + .../pkg/apis/clientauthentication/doc.go | 2 +- .../pkg/apis/clientauthentication/v1/doc.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 2 +- vendor/k8s.io/client-go/pkg/version/doc.go | 2 +- vendor/k8s.io/client-go/rest/.mockery.yaml | 10 + vendor/k8s.io/client-go/rest/client.go | 6 +- vendor/k8s.io/client-go/rest/config.go | 85 +- vendor/k8s.io/client-go/rest/plugin.go | 7 +- vendor/k8s.io/client-go/rest/request.go | 138 +- vendor/k8s.io/client-go/rest/urlbackoff.go | 101 +- vendor/k8s.io/client-go/rest/warnings.go | 57 +- vendor/k8s.io/client-go/rest/with_retry.go | 12 +- .../client-go/tools/cache/controller.go | 84 +- .../client-go/tools/cache/delta_fifo.go | 125 +- vendor/k8s.io/client-go/tools/cache/doc.go | 2 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 97 +- .../k8s.io/client-go/tools/cache/listers.go | 7 +- .../k8s.io/client-go/tools/cache/listwatch.go | 174 +- .../client-go/tools/cache/mutation_cache.go | 8 +- .../tools/cache/mutation_detector.go | 1 + .../k8s.io/client-go/tools/cache/reflector.go | 238 +- .../client-go/tools/cache/shared_informer.go | 194 +- .../client-go/tools/cache/the_real_fifo.go | 407 + .../client-go/tools/clientcmd/api/doc.go | 2 +- .../client-go/tools/clientcmd/api/v1/doc.go | 2 +- .../k8s.io/client-go/tools/clientcmd/doc.go | 2 +- .../tools/leaderelection/leasecandidate.go | 18 +- vendor/k8s.io/client-go/tools/record/doc.go | 2 +- vendor/k8s.io/client-go/tools/record/event.go | 2 +- .../client-go/tools/record/util/util.go | 17 + .../client-go/tools/remotecommand/doc.go | 2 +- .../tools/remotecommand/errorstream.go | 2 +- .../tools/remotecommand/websocket.go | 19 +- vendor/k8s.io/client-go/transport/cache.go | 8 +- .../client-go/transport/cert_rotation.go | 17 +- .../client-go/transport/round_trippers.go | 192 +- .../client-go/transport/token_source.go | 5 +- .../k8s.io/client-go/transport/transport.go | 2 +- vendor/k8s.io/client-go/util/cert/cert.go | 48 +- .../data_consistency_detector.go | 2 +- .../client-go/util/flowcontrol/backoff.go | 5 +- .../util/workqueue/delaying_queue.go | 19 +- vendor/k8s.io/client-go/util/workqueue/doc.go | 2 +- .../client-go/util/workqueue/parallelizer.go | 2 +- .../cmd/client-gen/args/args.go | 2 +- .../fake/generator_fake_for_clientset.go | 13 +- .../generators/generator_for_group.go | 16 +- .../code-generator/cmd/client-gen/main.go | 3 + .../cmd/informer-gen/generators/informer.go | 19 +- .../cmd/informer-gen/generators/types.go | 3 +- vendor/k8s.io/code-generator/doc.go | 2 +- vendor/k8s.io/code-generator/kube_codegen.sh | 58 +- .../component-base/cli/flag/tracker_flag.go | 82 + .../featuregate/feature_gate.go | 41 +- .../component-base/featuregate/registry.go | 454 - .../k8s.io/component-base/logs/api/v1/doc.go | 2 +- .../logs/api/v1/kube_features.go | 36 +- .../component-base/logs/api/v1/options.go | 6 +- .../component-base/tracing/api/v1/doc.go | 2 +- vendor/k8s.io/component-base/version/base.go | 2 +- .../k8s.io/component-base/version/version.go | 159 +- vendor/k8s.io/gengo/v2/README.md | 2 +- vendor/k8s.io/gengo/v2/comments.go | 217 +- .../gengo/v2/generator/snippet_writer.go | 18 + vendor/k8s.io/gengo/v2/namer/namer.go | 6 +- vendor/k8s.io/gengo/v2/parser/parse.go | 12 + vendor/k8s.io/gengo/v2/types/types.go | 28 +- .../pkg/apis/apiregistration/doc.go | 2 +- .../pkg/apis/apiregistration/v1/doc.go | 2 +- .../pkg/apis/apiregistration/v1beta1/doc.go | 2 +- .../v1/apiregistration_client.go | 12 +- .../kube-openapi/pkg/generators/api_linter.go | 3 + .../kube-openapi/pkg/generators/markers.go | 35 +- .../rules/list_type_streaming_tags.go | 117 + .../kube-openapi/pkg/handler3/handler.go | 2 +- vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 146 +- vendor/k8s.io/kubelet/config/v1beta1/doc.go | 2 +- vendor/k8s.io/kubelet/config/v1beta1/types.go | 77 +- .../config/v1beta1/zz_generated.deepcopy.go | 36 + vendor/k8s.io/utils/buffer/ring_growing.go | 116 +- .../k8s.io/utils/clock/testing/fake_clock.go | 362 - .../clock/testing/simple_interval_clock.go | 44 - vendor/modules.txt | 285 +- .../cluster-api/api/v1beta1/cluster_types.go | 803 +- .../api/v1beta1/clusterclass_types.go | 557 +- .../cluster-api/api/v1beta1/common_types.go | 46 +- .../api/v1beta1/condition_consts.go | 9 +- .../api/v1beta1/condition_types.go | 24 +- .../cluster-api/api/v1beta1/machine_types.go | 481 +- .../api/v1beta1/machinedeployment_types.go | 320 +- .../api/v1beta1/machinedrainrules_types.go | 235 + .../api/v1beta1/machinehealthcheck_types.go | 110 +- .../api/v1beta1/machineset_types.go | 263 +- .../api/v1beta1/v1beta2_condition_consts.go | 215 + .../api/v1beta1/zz_generated.deepcopy.go | 609 + .../api/v1beta1/zz_generated.openapi.go | 1828 +- vendor/sigs.k8s.io/cluster-api/errors/doc.go | 2 + .../controller-runtime/.golangci.yml | 13 +- .../controller-runtime/.gomodcheck.yaml | 3 + .../sigs.k8s.io/controller-runtime/Makefile | 6 +- .../sigs.k8s.io/controller-runtime/README.md | 1 + .../controller-runtime/pkg/builder/webhook.go | 99 +- .../controller-runtime/pkg/cache/cache.go | 22 +- .../pkg/cache/delegating_by_gvk_cache.go | 7 +- .../pkg/cache/internal/informers.go | 6 +- .../pkg/client/apiutil/restmapper.go | 137 +- .../controller-runtime/pkg/client/client.go | 4 +- .../pkg/client/fake/client.go | 85 +- .../controller-runtime/pkg/client/fake/doc.go | 2 +- .../pkg/client/interfaces.go | 14 +- .../controller-runtime/pkg/cluster/cluster.go | 18 +- .../pkg/config/controller.go | 6 + .../pkg/controller/controller.go | 14 +- .../controllerutil/controllerutil.go | 15 + .../controller-runtime/pkg/controller/name.go | 2 +- .../pkg/controller/priorityqueue/metrics.go | 146 + .../controller/priorityqueue/priorityqueue.go | 403 + .../controller-runtime/pkg/handler/enqueue.go | 19 +- .../pkg/handler/enqueue_mapped.go | 45 +- .../pkg/handler/enqueue_owner.go | 2 +- .../pkg/handler/eventhandler.go | 126 +- .../pkg/internal/controller/controller.go | 123 +- .../pkg/internal/metrics/workqueue.go | 131 + .../pkg/internal/source/kind.go | 2 +- .../pkg/metrics/server/server.go | 2 +- .../pkg/metrics/workqueue.go | 101 - .../pkg/predicate/predicate.go | 6 +- .../pkg/webhook/admission/defaulter.go | 84 - .../pkg/webhook/admission/defaulter_custom.go | 83 +- .../pkg/webhook/admission/validator.go | 127 - .../pkg/webhook/admission/validator_custom.go | 3 + .../controller-runtime/pkg/webhook/alias.go | 8 - .../controller-runtime/pkg/webhook/server.go | 4 +- .../cmd/controller-gen/main.go | 21 +- .../pkg/applyconfiguration/doc.go | 18 + .../pkg/applyconfiguration/gen.go | 236 + .../zz_generated.markerhelp.go | 41 + .../pkg/crd/markers/validation.go | 27 + .../crd/markers/zz_generated.markerhelp.go | 16 + .../controller-tools/pkg/genall/output.go | 6 + .../controller-tools/pkg/rbac/parser.go | 10 +- .../pkg/rbac/zz_generated.markerhelp.go | 4 + vendor/sigs.k8s.io/randfill/CONTRIBUTING.md | 43 + .../compress => sigs.k8s.io/randfill}/LICENSE | 108 +- vendor/sigs.k8s.io/randfill/NOTICE | 24 + vendor/sigs.k8s.io/randfill/OWNERS | 8 + vendor/sigs.k8s.io/randfill/OWNERS_ALIASES | 14 + .../gofuzz => sigs.k8s.io/randfill}/README.md | 45 +- vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS | 16 + .../randfill}/bytesource/bytesource.go | 0 .../sigs.k8s.io/randfill/code-of-conduct.md | 3 + vendor/sigs.k8s.io/randfill/randfill.go | 682 + vendor/sigs.k8s.io/yaml/.travis.yml | 12 - vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 - vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 200 +- .../yaml/goyaml.v2/yaml_aliases.go | 85 + vendor/sigs.k8s.io/yaml/yaml.go | 11 +- 1704 files changed, 137872 insertions(+), 74376 deletions(-) delete mode 100644 vendor/github.com/evanphx/json-patch/.gitignore delete mode 100644 vendor/github.com/evanphx/json-patch/LICENSE delete mode 100644 vendor/github.com/evanphx/json-patch/README.md delete mode 100644 vendor/github.com/evanphx/json-patch/errors.go delete mode 100644 vendor/github.com/evanphx/json-patch/merge.go delete mode 100644 vendor/github.com/evanphx/json-patch/patch.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh delete mode 100644 vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/proto.go delete mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/wire.go delete mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go rename vendor/github.com/google/{gofuzz => btree}/LICENSE (100%) create mode 100644 vendor/github.com/google/btree/README.md create mode 100644 vendor/github.com/google/btree/btree.go create mode 100644 vendor/github.com/google/btree/btree_generic.go create mode 100644 vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go create mode 100644 vendor/github.com/google/gnostic-models/openapiv3/annotations.proto delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/fuzz.go delete mode 100644 vendor/github.com/gorilla/websocket/.editorconfig delete mode 100644 vendor/github.com/gorilla/websocket/.golangci.yml create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS delete mode 100644 vendor/github.com/gorilla/websocket/Makefile delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go delete mode 100644 vendor/github.com/klauspost/compress/.gitattributes delete mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml delete mode 100644 vendor/github.com/klauspost/compress/README.md delete mode 100644 vendor/github.com/klauspost/compress/SECURITY.md delete mode 100644 vendor/github.com/klauspost/compress/compressible.go delete mode 100644 vendor/github.com/klauspost/compress/fse/README.md delete mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/compress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/fse.go delete mode 100644 vendor/github.com/klauspost/compress/gen.sh delete mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/huff0/README.md delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go delete mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go delete mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/s2sx.mod delete mode 100644 vendor/github.com/klauspost/compress/s2sx.sum delete mode 100644 vendor/github.com/klauspost/compress/zstd/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/history.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_image_policy.go create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicystatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/extramapping.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicystatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pkicertificatesubject.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyfulciosubject.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyidentity.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchexactrepository.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchremapidentity.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimorexpressionmapping.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/custom.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gathererconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherers.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagatherspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeclaimreference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/storage.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterimagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/config_client.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/doc.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/generated_expansion.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterimagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/interface.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1/clusterimagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1/imagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/expansion_generated.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignode.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespec.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespecmachineconfigversion.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusmachineconfigversion.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatuspinnedimageset.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/mcoobjectreference.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfignode.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfignode.go create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineconfignode.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/accesslogging.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/addpage.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetricsrecursivewalkconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsnetworkloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusternetworkentry.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configmapfilereference.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleproviders.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/containerloggingdestinationparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategorymeta.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/egressipconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/exportnetworkflows.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/featuresmigration.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/filereferencesource.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/generationstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hybridoverlayconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmcloudcsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingress.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheader.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheaders.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheader.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactions.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpuniqueidheaderpolicy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerlogging.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollersethttpheader.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4gatewayconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4ovnkubernetesconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6gatewayconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6ovnkubernetesconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/logo.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/managedbootimages.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigration.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigrationvalues.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkmigration.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyclusterstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecfile.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecsshkey.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusfile.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatussshkey.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/oauthapiserverstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/projectaccess.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/quickstarts.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/server.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/simplemacvlanconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamaddresses.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamdns.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamroutes.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/statuspageprovider.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/syslogloggingdestinationparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/theme.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/register.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/operator.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/OWNERS delete mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/networking.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/event_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiregistration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/canonicalize.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go create mode 100644 vendor/github.com/spf13/pflag/bool_func.go create mode 100644 vendor/github.com/spf13/pflag/errors.go create mode 100644 vendor/github.com/spf13/pflag/func.go create mode 100644 vendor/github.com/spf13/pflag/text.go create mode 100644 vendor/github.com/spf13/pflag/time.go rename vendor/go.opentelemetry.io/otel/{internal/attribute => attribute/internal}/attribute.go (97%) create mode 100644 vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go create mode 100644 vendor/go.uber.org/automaxprocs/.codecov.yml rename vendor/{github.com/klauspost/compress => go.uber.org/automaxprocs}/.gitignore (74%) create mode 100644 vendor/go.uber.org/automaxprocs/CHANGELOG.md create mode 100644 vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/automaxprocs/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/automaxprocs/LICENSE create mode 100644 vendor/go.uber.org/automaxprocs/Makefile create mode 100644 vendor/go.uber.org/automaxprocs/README.md create mode 100644 vendor/go.uber.org/automaxprocs/automaxprocs.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go create mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go create mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/version.go create mode 100644 vendor/go.yaml.in/yaml/v2/.travis.yml rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE.libyaml (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE (100%) create mode 100644 vendor/go.yaml.in/yaml/v2/README.md rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/apic.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/decode.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/emitterc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/encode.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/parserc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/readerc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/resolve.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/scannerc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/sorter.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/writerc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yaml.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yamlh.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yamlprivateh.go (100%) delete mode 100644 vendor/golang.org/x/exp/LICENSE delete mode 100644 vendor/golang.org/x/exp/PATENTS delete mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/deps.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/import.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/classify_call.go create mode 100644 vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go delete mode 100644 vendor/google.golang.org/protobuf/internal/editionssupport/editions.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe_go121.go => strings_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe_go121.go => value_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go delete mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/k8s.io/api/networking/v1/well_known_labels.go create mode 100644 vendor/k8s.io/api/resource/v1beta1/devicetaint.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/devicetaint.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/doc.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/generated.proto create mode 100644 vendor/k8s.io/api/resource/v1beta2/register.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/types.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/operation/operation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/ip.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/counter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/counterset.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicecounterconsumption.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicesubrequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetoleration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/deviceclass.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/deviceclass.go rename vendor/{github.com/google/gofuzz => k8s.io/client-go/kubernetes/typed/resource/v1beta2}/doc.go (76%) rename vendor/{sigs.k8s.io/yaml/yaml_go110.go => k8s.io/client-go/kubernetes/typed/resource/v1beta2/generated_expansion.go} (55%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/deviceclass.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/rest/.mockery.yaml create mode 100644 vendor/k8s.io/client-go/tools/cache/the_real_fifo.go create mode 100644 vendor/k8s.io/component-base/cli/flag/tracker_flag.go delete mode 100644 vendor/k8s.io/component-base/featuregate/registry.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/list_type_streaming_tags.go delete mode 100644 vendor/k8s.io/utils/clock/testing/fake_clock.go delete mode 100644 vendor/k8s.io/utils/clock/testing/simple_interval_clock.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1beta1/machinedrainrules_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1beta1/v1beta2_condition_consts.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/metrics.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/metrics/workqueue.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/applyconfiguration/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/applyconfiguration/gen.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/applyconfiguration/zz_generated.markerhelp.go create mode 100644 vendor/sigs.k8s.io/randfill/CONTRIBUTING.md rename vendor/{github.com/klauspost/compress => sigs.k8s.io/randfill}/LICENSE (67%) create mode 100644 vendor/sigs.k8s.io/randfill/NOTICE create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS_ALIASES rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/README.md (53%) create mode 100644 vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/bytesource/bytesource.go (100%) create mode 100644 vendor/sigs.k8s.io/randfill/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/randfill/randfill.go delete mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go diff --git a/cmd/cluster-node-tuning-operator/main.go b/cmd/cluster-node-tuning-operator/main.go index 6d630d80c5..ca670c7517 100644 --- a/cmd/cluster-node-tuning-operator/main.go +++ b/cmd/cluster-node-tuning-operator/main.go @@ -33,6 +33,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" + "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -273,7 +274,7 @@ func setupFeatureGates(ctx context.Context, config *rest.Config, operatorNamespa if err != nil { klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) } - eventRecorder := events.NewKubeRecorder(k8sClient.CoreV1().Events(metav1.NamespaceNone), "performance-profile-controller", controllerRef) + eventRecorder := events.NewKubeRecorder(k8sClient.CoreV1().Events(metav1.NamespaceNone), "performance-profile-controller", controllerRef, clock.RealClock{}) informersFactory := configinformers.NewSharedInformerFactory(configClient, 10*time.Minute) // By default, this will exit(0) the process if the featuregates ever change to a different set of values. featureGateAccessor := featuregates.NewFeatureGateAccess( diff --git a/go.mod b/go.mod index 2c289bb96b..938aad477d 100644 --- a/go.mod +++ b/go.mod @@ -1,50 +1,48 @@ module github.com/openshift/cluster-node-tuning-operator -go 1.23.0 - -toolchain go1.23.4 +go 1.24.0 require ( github.com/RHsyseng/operator-utils v1.4.13 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/coreos/ignition v0.35.0 - github.com/coreos/ignition/v2 v2.21.0 + github.com/coreos/ignition/v2 v2.22.0 github.com/docker/go-units v0.5.0 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.7.0 - github.com/jaypipes/ghw v0.16.0 + github.com/jaypipes/ghw v0.17.0 github.com/kevinburke/go-bindata v3.24.0+incompatible - github.com/onsi/ginkgo/v2 v2.22.1 - github.com/onsi/gomega v1.36.2 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.38.0 github.com/openshift-kni/debug-tools v0.2.3 - github.com/openshift-kni/k8sreporter v1.0.6 - github.com/openshift/api v0.0.0-20250407083304-00e6260bd8b8 - github.com/openshift/build-machinery-go v0.0.0-20250211133638-a00a772ae1a2 - github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 + github.com/openshift-kni/k8sreporter v1.0.7 + github.com/openshift/api v0.0.0-20250724131818-98ae8fbe2b34 + github.com/openshift/build-machinery-go v0.0.0-20250602125535-1b6d00b8c37c + github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 - github.com/openshift/hypershift/api v0.0.0-20240604072534-cd2d5291e2b7 - github.com/openshift/library-go v0.0.0-20240419113445-f1541d628746 + github.com/openshift/hypershift/api v0.0.0-20250724113115-0281b49cc465 + github.com/openshift/library-go v0.0.0-20250724123005-03d85c4e997c github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.21.1 + github.com/prometheus/client_golang v1.22.0 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.7 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.6 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.6 - k8s.io/client-go v0.32.6 - k8s.io/code-generator v0.32.3 + k8s.io/api v0.33.3 + k8s.io/apiextensions-apiserver v0.33.3 + k8s.io/apimachinery v0.33.3 + k8s.io/client-go v0.33.3 + k8s.io/code-generator v0.33.3 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kubelet v0.32.6 - k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e + k8s.io/kubelet v0.33.3 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 kubevirt.io/qe-tools v0.1.8 - sigs.k8s.io/cluster-api v1.8.11 - sigs.k8s.io/controller-runtime v0.19.7 - sigs.k8s.io/controller-tools v0.17.3 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/cluster-api v1.10.4 + sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-tools v0.18.0 + sigs.k8s.io/yaml v1.5.0 ) require ( @@ -57,11 +55,10 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/vcontext v0.0.0-20231102161604-685dc7299dc5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fatih/color v1.18.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -79,19 +76,17 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/cadvisor v0.52.1 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jaypipes/pcidb v1.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -112,66 +107,68 @@ require ( github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.mongodb.org/mongo-driver v1.11.1 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.37.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.10.0 // indirect - golang.org/x/tools v0.30.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect - google.golang.org/grpc v1.70.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.33.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/grpc v1.72.2 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.1 // indirect - k8s.io/apiserver v0.32.3 // indirect - k8s.io/component-base v0.32.3 // indirect - k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect - k8s.io/kube-aggregator v0.32.3 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/apiserver v0.33.3 // indirect + k8s.io/component-base v0.33.3 // indirect + k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect + k8s.io/kube-aggregator v0.33.3 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) -// Pinned to kubernetes-1.32.3 +// Pinned to kubernetes-1.33.3 replace ( - k8s.io/api => k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.32.3 - k8s.io/apiserver => k8s.io/apiserver v0.32.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.3 - k8s.io/client-go => k8s.io/client-go v0.32.3 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.3 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.3 - k8s.io/code-generator => k8s.io/code-generator v0.32.3 - k8s.io/component-base => k8s.io/component-base v0.32.3 - k8s.io/component-helpers => k8s.io/component-helpers v0.32.3 - k8s.io/controller-manager => k8s.io/controller-manager v0.32.3 - k8s.io/cri-api => k8s.io/cri-api v0.32.3 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.3 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.3 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.3 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.3 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f - k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.3 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.3 - k8s.io/kubectl => k8s.io/kubectl v0.32.3 - k8s.io/kubelet => k8s.io/kubelet v0.32.3 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.32.3 - k8s.io/metrics => k8s.io/metrics v0.32.3 - k8s.io/mount-utils => k8s.io/mount-utils v0.32.3 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.3 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.3 + k8s.io/api => k8s.io/api v0.33.3 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.3 + k8s.io/apimachinery => k8s.io/apimachinery v0.33.3 + k8s.io/apiserver => k8s.io/apiserver v0.33.3 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.3 + k8s.io/client-go => k8s.io/client-go v0.33.3 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.3 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.3 + k8s.io/code-generator => k8s.io/code-generator v0.33.3 + k8s.io/component-base => k8s.io/component-base v0.33.3 + k8s.io/component-helpers => k8s.io/component-helpers v0.33.3 + k8s.io/controller-manager => k8s.io/controller-manager v0.33.3 + k8s.io/cri-api => k8s.io/cri-api v0.33.3 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.3 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.3 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.3 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.3 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff + k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.3 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.3 + k8s.io/kubectl => k8s.io/kubectl v0.33.3 + k8s.io/kubelet => k8s.io/kubelet v0.33.3 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.33.3 + k8s.io/metrics => k8s.io/metrics v0.33.3 + k8s.io/mount-utils => k8s.io/mount-utils v0.33.3 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.3 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.3 ) diff --git a/go.sum b/go.sum index ea705f8a4a..93a2c5c688 100644 --- a/go.sum +++ b/go.sum @@ -28,7 +28,6 @@ github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+ github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= @@ -57,8 +56,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/coreos/ignition/v2 v2.21.0 h1:2q44zldXRuBXOJl/6Z6xBshZzi2muQuXRZBGGpvNbsk= -github.com/coreos/ignition/v2 v2.21.0/go.mod h1:axhFZ3jEgXBjKtKp0rSMv2li0Rt43rasp5hS9uyYjco= +github.com/coreos/ignition/v2 v2.22.0 h1:DcVOKStjs0Qaz8F2bTX0jD0YDRLlzkIwON42502Fkuo= +github.com/coreos/ignition/v2 v2.22.0/go.mod h1:renvkPY1eRngVQvDFjQai+/EHPXEVb4VO3v5tcERfZ8= github.com/coreos/vcontext v0.0.0-20231102161604-685dc7299dc5 h1:sMZSC2BW5LKCdvNbfN12SbKrNvtLBUNjfHZmMvI2ItY= github.com/coreos/vcontext v0.0.0-20231102161604-685dc7299dc5/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -71,8 +70,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= @@ -83,8 +82,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -208,10 +207,12 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cadvisor v0.52.1 h1:sC8SZ6jio9ds+P2dk51bgbeYeufxo55n0X3tmrpA9as= github.com/google/cadvisor v0.52.1/go.mod h1:OAhPcx1nOm5YwMh/JhpUOMKyv1YKLRtS9KgzWPndHmA= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -236,16 +237,16 @@ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwg github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -257,8 +258,8 @@ github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+h github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jaypipes/ghw v0.16.0 h1:3HurCTS38VNpeQLo5fIdZsySuo/qAfpPSJ5t05QBFPM= -github.com/jaypipes/ghw v0.16.0/go.mod h1:In8SsaDqlb1oTyrbmTC14uy+fbBMvp+xdqX51MidlD8= +github.com/jaypipes/ghw v0.17.0 h1:EVLJeNcy5z6GK/Lqby0EhBpynZo+ayl8iJWY0kbEUJA= +github.com/jaypipes/ghw v0.17.0/go.mod h1:In8SsaDqlb1oTyrbmTC14uy+fbBMvp+xdqX51MidlD8= github.com/jaypipes/pcidb v1.0.1 h1:WB2zh27T3nwg8AE8ei81sNRb9yWBii3JGNJtT7K9Oic= github.com/jaypipes/pcidb v1.0.1/go.mod h1:6xYUz/yYEyOkIkUt2t2J2folIuZ4Yg6uByCGFXMCeE4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -276,8 +277,8 @@ github.com/kevinburke/go-bindata v3.24.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -358,8 +359,8 @@ github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/ github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= -github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -384,24 +385,24 @@ github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16A github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/openshift-kni/debug-tools v0.2.3 h1:YosToRjzo004OBZRmqb5VmxxxFH0mJFbagMdgmRb7Xg= github.com/openshift-kni/debug-tools v0.2.3/go.mod h1:GneHFy6PypBIaEg/1iM5t3xC+R5rqyDrvE6kpx/QR+0= -github.com/openshift-kni/k8sreporter v1.0.6 h1:aaxDzZx3s9bo1I3nopR63RGVZxcJgR94j5X87aDihYo= -github.com/openshift-kni/k8sreporter v1.0.6/go.mod h1:tX6LOg0m0oXje7WNLFo8LKHC9Ix8VV0a7vUc6eyeFBQ= -github.com/openshift/api v0.0.0-20250407083304-00e6260bd8b8 h1:n4r9Gzv4+WZxfuE4G4vI24rGyXFxT6uvsfdAp/JSY6o= -github.com/openshift/api v0.0.0-20250407083304-00e6260bd8b8/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= -github.com/openshift/build-machinery-go v0.0.0-20250211133638-a00a772ae1a2 h1:Pw/OKhN0r8HZcR4No8qh0QygTvuYsDeZhHsK6T9LC+I= -github.com/openshift/build-machinery-go v0.0.0-20250211133638-a00a772ae1a2/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo= +github.com/openshift-kni/k8sreporter v1.0.7 h1:uA5mTVmxYaKBxNAT2+rZkfU3NpT8gjb/2OokzL5b0ZY= +github.com/openshift-kni/k8sreporter v1.0.7/go.mod h1:tX6LOg0m0oXje7WNLFo8LKHC9Ix8VV0a7vUc6eyeFBQ= +github.com/openshift/api v0.0.0-20250724131818-98ae8fbe2b34 h1:J5AU7zxhpyg3XQ66cm/1pXtSh3RlH6TgBEMRB4VWsP8= +github.com/openshift/api v0.0.0-20250724131818-98ae8fbe2b34/go.mod h1:SPLf21TYPipzCO67BURkCfK6dcIIxx0oNRVWaOyRcXM= +github.com/openshift/build-machinery-go v0.0.0-20250602125535-1b6d00b8c37c h1:gJvhduWIrpzoUTwrJjjeul+hGETKkhRhEZosBg/X3Hg= +github.com/openshift/build-machinery-go v0.0.0-20250602125535-1b6d00b8c37c/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee h1:tOtrrxfDEW8hK3eEsHqxsXurq/D6LcINGfprkQC3hqY= +github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee/go.mod h1:zhRiYyNMk89llof2qEuGPWPD+joQPhCRUc2IK0SB510= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= -github.com/openshift/hypershift/api v0.0.0-20240604072534-cd2d5291e2b7 h1:RxPiVgFstgDDK/W4djXU6/w8zfX6pFVXIe6+z6m61nk= -github.com/openshift/hypershift/api v0.0.0-20240604072534-cd2d5291e2b7/go.mod h1:IDXXroBJeH+nIHkA17S3Yq2QDQg02tMnCWOXoyZVOLY= -github.com/openshift/library-go v0.0.0-20240419113445-f1541d628746 h1:MyLp0GgPyIgeVd1scI0iUasVgd6Xpy/t04Rk+I23wRE= -github.com/openshift/library-go v0.0.0-20240419113445-f1541d628746/go.mod h1:m/HsttSi90vSixwoy5mPUBHcZid2YRw/QbsLErLxF9s= +github.com/openshift/hypershift/api v0.0.0-20250724113115-0281b49cc465 h1:H+cEubOxo39gtUEcgtSWlo0fO51dK3XyTtsovxiWkUA= +github.com/openshift/hypershift/api v0.0.0-20250724113115-0281b49cc465/go.mod h1:oCHHubcVHaPsiV8jSafkG/zQiJ1bwnxQfOM7mPpu3uM= +github.com/openshift/library-go v0.0.0-20250724123005-03d85c4e997c h1:KG1U3r4ocDe39/Mvc6oxgDoi9YlCrWAor6n+uvJzRok= +github.com/openshift/library-go v0.0.0-20250724123005-03d85c4e997c/go.mod h1:tptKNust9MdRI0p90DoBSPHIrBa9oh+Rok59tF0vT8c= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -412,8 +413,12 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= @@ -427,8 +432,9 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -439,12 +445,14 @@ github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -486,22 +494,28 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -525,9 +539,11 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -536,7 +552,6 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -568,11 +583,12 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -617,18 +633,20 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -646,8 +664,9 @@ golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -704,12 +723,14 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -731,11 +752,13 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -758,13 +781,14 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= -golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -813,17 +837,18 @@ golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -850,8 +875,8 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -859,8 +884,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -869,14 +894,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -914,24 +938,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= -k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/code-generator v0.32.3 h1:31p2TVzC9+hVdSkAFruAk3JY+iSfzrJ83Qij1yZutyw= -k8s.io/code-generator v0.32.3/go.mod h1:+mbiYID5NLsBuqxjQTygKM/DAdKpAjvBzrJd64NU1G8= -k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= -k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= +k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= +k8s.io/code-generator v0.33.3 h1:6+34LhYkIuQ/yn/E3qlpVqjQaP8smzCu4NE1A8b0LWs= +k8s.io/code-generator v0.33.3/go.mod h1:6Y02+HQJYgNphv9z3wJB5w+sjYDIEBQW7sh62PkufvA= +k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= +k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= -k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= -k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -939,39 +963,40 @@ k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.32.3 h1:j+lUE4V1sMANYv/wCdU2E8SgnSLJksaJ+6bNnoV6Pfs= -k8s.io/kube-aggregator v0.32.3/go.mod h1:aAl5az9Rlq4sPPSf8/ckpDGSYit75g4g1dp6rKInXZM= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubelet v0.32.3 h1:B9HzW4yB67flx8tN2FYuDwZvxnmK3v5EjxxFvOYjmc8= -k8s.io/kubelet v0.32.3/go.mod h1:yyAQSCKC+tjSlaFw4HQG7Jein+vo+GeKBGdXdQGvL1U= +k8s.io/kube-aggregator v0.33.3 h1:Pa6hQpKJMX0p0D2wwcxXJgu02++gYcGWXoW1z1ZJDfo= +k8s.io/kube-aggregator v0.33.3/go.mod h1:hwvkUoQ8q6gv0+SgNnlmQ3eUue1zHhJKTHsX7BwxwSE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kubelet v0.33.3 h1:Cvy8+7Lq9saZds2ib7YBXbKvkMMJu3f5mzucmhSIJno= +k8s.io/kubelet v0.33.3/go.mod h1:Q1Cfr6VQq1m9v9XsE/mDmhTxPdN6NPU6Ug0e6mAqi58= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/qe-tools v0.1.8 h1:Ar7qicmzHdd+Ia+6rjHDg3D7GReIyq7QFXoC4F7TjhQ= kubevirt.io/qe-tools v0.1.8/go.mod h1:+Tr/WZGHIDQa/4pQgzM7+4J6YeVbUWAXESXtL2/zxqc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/cluster-api v1.8.11 h1:O4PhhYhrKq3/PVrUaZGXMlczjvQmuWaG4xPUmLayHgI= -sigs.k8s.io/cluster-api v1.8.11/go.mod h1:5MX/395c1wR69dkIwOvhjUwXASu19rU7RCCMeljx96c= -sigs.k8s.io/controller-runtime v0.19.7 h1:DLABZfMr20A+AwCZOHhcbcu+TqBXnJZaVBri9K3EO48= -sigs.k8s.io/controller-runtime v0.19.7/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw= -sigs.k8s.io/controller-tools v0.17.3/go.mod h1:1ii+oXcYZkxcBXzwv3YZBlzjt1fvkrCGjVF73blosJI= +sigs.k8s.io/cluster-api v1.10.4 h1:5mdyWLGbbwOowWrjqM/J9N600QnxTohu5J1/1YR6g7c= +sigs.k8s.io/cluster-api v1.10.4/go.mod h1:68GJs286ZChsncp+TxYNj/vhy2NWokiPtH4+SA0afs0= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= +sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= +sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= diff --git a/hack/run-render-command-functests.sh b/hack/run-render-command-functests.sh index a3786af68c..f43717917b 100755 --- a/hack/run-render-command-functests.sh +++ b/hack/run-render-command-functests.sh @@ -22,4 +22,4 @@ echo "Running Functional Tests: ${GINKGO_SUITS}" # --fail-fast: ginkgo will stop the suite right after the first spec failure # --flake-attempts: rerun the test if it fails # --require-suite: fail if tests are not executed because of missing suite -GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --fail-fast --flake-attempts=2 --require-suite ${GINKGO_SUITS} --junit-report=/tmp/artifacts +GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --fail-fast --flake-attempts=2 --require-suite --junit-report=/tmp/artifacts ${GINKGO_SUITS} diff --git a/hack/run-test.sh b/hack/run-test.sh index 5db185a479..4cdffa9a18 100755 --- a/hack/run-test.sh +++ b/hack/run-test.sh @@ -87,7 +87,7 @@ main() { MESSAGE="${HEADER_MESSAGE}: ${GINKGO_SUITS}" print ${MESSAGE} - GINKGO_FLAGS="${NO_COLOR} ${DRY_RUN} ${EXTRA_PARAMS} --require-suite ${GINKGO_SUITS} ${REPORT_PARAMS}" + GINKGO_FLAGS="${NO_COLOR} ${DRY_RUN} ${EXTRA_PARAMS} --require-suite ${REPORT_PARAMS} ${GINKGO_SUITS}" print "Command to run: GOFLAGS=-mod=vendor ginkgo ${GINKGO_FLAGS}" if [ "$ONLY_CLI_PRINT" = true ]; then diff --git a/hack/update-k8s-eviction-defaults.sh b/hack/update-k8s-eviction-defaults.sh index a006276fde..0ccfcc6064 100755 --- a/hack/update-k8s-eviction-defaults.sh +++ b/hack/update-k8s-eviction-defaults.sh @@ -21,7 +21,7 @@ set -o pipefail SCRIPT_ROOT=$(realpath "${0%/*}/..") pushd ${SCRIPT_ROOT} -TAG=1.32.3 +TAG=1.33.3 OS=$( go env GOOS ) KUBETAG=${KUBETAG:-$TAG} diff --git a/pkg/apis/performanceprofile/v2/performanceprofile_validation.go b/pkg/apis/performanceprofile/v2/performanceprofile_validation.go index 7987d02a62..b00e28933a 100644 --- a/pkg/apis/performanceprofile/v2/performanceprofile_validation.go +++ b/pkg/apis/performanceprofile/v2/performanceprofile_validation.go @@ -81,18 +81,26 @@ var x86ValidKernelPageSizes = []string{ var validatorContext = context.TODO() -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *PerformanceProfile) ValidateCreate() (admission.Warnings, error) { - klog.Infof("Create validation for the performance profile %q", r.Name) +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type +func (r *PerformanceProfile) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + profile, ok := obj.(*PerformanceProfile) + if !ok { + return admission.Warnings{}, fmt.Errorf("expected PerformanceProfile, got %T", obj) + } + klog.Infof("Create validation for the performance profile %q", profile.Name) - return r.validateCreateOrUpdate() + return profile.validateCreateOrUpdate() } -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *PerformanceProfile) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { - klog.Infof("Update validation for the performance profile %q", r.Name) +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type +func (r *PerformanceProfile) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + profile, ok := newObj.(*PerformanceProfile) + if !ok { + return admission.Warnings{}, fmt.Errorf("expected PerformanceProfile, got %T", newObj) + } + klog.Infof("Update validation for the performance profile %q", profile.Name) - return r.validateCreateOrUpdate() + return profile.validateCreateOrUpdate() } func (r *PerformanceProfile) validateCreateOrUpdate() (admission.Warnings, error) { @@ -118,9 +126,13 @@ func (r *PerformanceProfile) validateCreateOrUpdate() (admission.Warnings, error r.Name, allErrs) } -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *PerformanceProfile) ValidateDelete() (admission.Warnings, error) { - klog.Infof("Delete validation for the performance profile %q", r.Name) +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type +func (r *PerformanceProfile) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + profile, ok := obj.(*PerformanceProfile) + if !ok { + return admission.Warnings{}, fmt.Errorf("expected PerformanceProfile, got %T", obj) + } + klog.Infof("Delete validation for the performance profile %q", profile.Name) // TODO(user): fill in your validation logic upon object deletion. return admission.Warnings{}, nil diff --git a/pkg/apis/performanceprofile/v2/performanceprofile_webhook.go b/pkg/apis/performanceprofile/v2/performanceprofile_webhook.go index 3f6ebba34a..62bef40b8f 100644 --- a/pkg/apis/performanceprofile/v2/performanceprofile_webhook.go +++ b/pkg/apis/performanceprofile/v2/performanceprofile_webhook.go @@ -6,7 +6,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" ) -var _ webhook.Validator = &PerformanceProfile{} +var _ webhook.CustomValidator = &PerformanceProfile{} // we need this variable only because our validate methods should have access to the client var validatorClient client.Client @@ -19,5 +19,6 @@ func (r *PerformanceProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). + WithValidator(r). Complete() } diff --git a/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go b/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go index 13742ab01a..85d110112b 100644 --- a/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go +++ b/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go @@ -64,7 +64,7 @@ var ( ErrTangThumbprintRequired = errors.New("thumbprint is required") ErrInvalidTangAdvertisement = errors.New("advertisement is not valid JSON") ErrFileIllegalMode = errors.New("illegal file mode") - ErrModeSpecialBits = errors.New("setuid/setgid/sticky bits are not supported in spec versions older than 3.4.0") + ErrModeSpecialBits = errors.New("setuid/setgid/sticky bits are not supported or functional in spec versions older than 3.6.0") ErrBothIDAndNameSet = errors.New("cannot set both id and name") ErrLabelTooLong = errors.New("partition labels may not exceed 36 characters") ErrDoesntMatchGUIDRegex = errors.New("doesn't match the form \"01234567-89AB-CDEF-EDCB-A98765432101\"") diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 92b78048e2..6f24dfff56 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,8 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) ## [v3.12.1] - 2024-05-28 @@ -18,7 +21,7 @@ - fix by restoring custom JSON handler functions (Mike Beaumont #540) -## [v3.12.0] - 2023-08-19 +## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 7234604e47..3fb40d1980 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -3,7 +3,7 @@ go-restful package for building REST-style Web Services using Google Go [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index a9b3faaa81..7f04bd9053 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && (length == "" || length == "0") { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be77..a2056e2acb 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore deleted file mode 100644 index b7ed7f956d..0000000000 --- a/vendor/github.com/evanphx/json-patch/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# editor and IDE paraphernalia -.idea -.vscode - -# macOS paraphernalia -.DS_Store diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index df76d7d771..0000000000 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index 97e319b21b..0000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,317 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -These global variables control the behavior of `jsonpatch.Apply`. - -An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior -is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. - -Structure `jsonpatch.ApplyOptions` includes the configuration options above -and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. - -When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore -`remove` operations whose `path` points to a non-existent location in the JSON document. -`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` -returning an error when hitting a missing `path` on `remove`. - -When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure -that `add` operations produce all the `path` elements that are missing from the target object. - -Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` -whose values are populated from the global configuration variables. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go deleted file mode 100644 index 75304b4437..0000000000 --- a/vendor/github.com/evanphx/json-patch/errors.go +++ /dev/null @@ -1,38 +0,0 @@ -package jsonpatch - -import "fmt" - -// AccumulatedCopySizeError is an error type returned when the accumulated size -// increase caused by copy operations in a patch operation has exceeded the -// limit. -type AccumulatedCopySizeError struct { - limit int64 - accumulated int64 -} - -// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. -func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { - return &AccumulatedCopySizeError{limit: l, accumulated: a} -} - -// Error implements the error interface. -func (a *AccumulatedCopySizeError) Error() string { - return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) -} - -// ArraySizeError is an error type returned when the array size has exceeded -// the limit. -type ArraySizeError struct { - limit int - size int -} - -// NewArraySizeError returns an ArraySizeError. -func NewArraySizeError(l, s int) *ArraySizeError { - return &ArraySizeError{limit: l, size: s} -} - -// Error implements the error interface. -func (a *ArraySizeError) Error() string { - return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) -} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index ad88d40181..0000000000 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,389 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - if !mergeMerge { - pruneNulls(v) - } - - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - } - newAry = append(newAry, v) - } - - *ary = newAry - - return ary -} - -var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") -var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, ErrBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, ErrBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// resemblesJSONArray indicates whether the byte-slice "appears" to be -// a JSON array or not. -// False-positives are possible, as this function does not check the internal -// structure of the array. It only checks that the outer syntax is present and -// correct. -func resemblesJSONArray(input []byte) bool { - input = bytes.TrimSpace(input) - - hasPrefix := bytes.HasPrefix(input, []byte("[")) - hasSuffix := bytes.HasSuffix(input, []byte("]")) - - return hasPrefix && hasSuffix -} - -// CreateMergePatch will return a merge patch document capable of converting -// the original document(s) to the modified document(s). -// The parameters can be bytes of either two JSON Documents, or two arrays of -// JSON documents. -// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalResemblesArray := resemblesJSONArray(originalJSON) - modifiedResemblesArray := resemblesJSONArray(modifiedJSON) - - // Do both byte-slices seem like JSON arrays? - if originalResemblesArray && modifiedResemblesArray { - return createArrayMergePatch(originalJSON, modifiedJSON) - } - - // Are both byte-slices are not arrays? Then they are likely JSON objects... - if !originalResemblesArray && !modifiedResemblesArray { - return createObjectMergePatch(originalJSON, modifiedJSON) - } - - // None of the above? Then return an error because of mismatched types. - return nil, errBadMergeTypes -} - -// createObjectMergePatch will return a merge-patch document capable of -// converting the original document to the modified document. -func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDoc := map[string]interface{}{} - modifiedDoc := map[string]interface{}{} - - err := json.Unmarshal(originalJSON, &originalDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - dest, err := getDiff(originalDoc, modifiedDoc) - if err != nil { - return nil, err - } - - return json.Marshal(dest) -} - -// createArrayMergePatch will return an array of merge-patch documents capable -// of converting the original document to the modified document for each -// pair of JSON documents provided in the arrays. -// Arrays of mismatched sizes will result in an error. -func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDocs := []json.RawMessage{} - modifiedDocs := []json.RawMessage{} - - err := json.Unmarshal(originalJSON, &originalDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - total := len(originalDocs) - if len(modifiedDocs) != total { - return nil, ErrBadJSONDoc - } - - result := []json.RawMessage{} - for i := 0; i < len(originalDocs); i++ { - original := originalDocs[i] - modified := modifiedDocs[i] - - patch, err := createObjectMergePatch(original, modified) - if err != nil { - return nil, err - } - - result = append(result, json.RawMessage(patch)) - } - - return json.Marshal(result) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - if len(bt) != len(at) { - return false - } - for key := range bt { - av, aOK := at[key] - bv, bOK := bt[key] - if aOK != bOK { - return false - } - if !matchesValue(av, bv) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index cd0274e1e4..0000000000 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,851 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var ( - // SupportNegativeIndices decides whether to support non-standard practice of - // allowing negative indices to mean indices starting at the end of an array. - // Default to true. - SupportNegativeIndices bool = true - // AccumulatedCopySizeLimit limits the total size increase in bytes caused by - // "copy" operations in a patch. - AccumulatedCopySizeLimit int64 = 0 -) - -var ( - ErrTestFailed = errors.New("test failed") - ErrMissing = errors.New("missing value") - ErrUnknownType = errors.New("unknown object type") - ErrInvalid = errors.New("invalid state detected") - ErrInvalidIndex = errors.New("invalid index referenced") -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -// Operation is a single JSON-Patch step, such as a single 'add' operation. -type Operation map[string]*json.RawMessage - -// Patch is an ordered collection of Operations. -type Patch []Operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, ErrUnknownType - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func deepCopy(src *lazyNode) (*lazyNode, int, error) { - if src == nil { - return nil, 0, nil - } - a, err := src.MarshalJSON() - if err != nil { - return nil, 0, err - } - sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - if len(n.doc) != len(o.doc) { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if (v == nil) != (ov == nil) { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -// Kind reads the "op" field of the Operation. -func (o Operation) Kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -// Path reads the "path" field of the Operation. -func (o Operation) Path() (string, error) { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") -} - -// From reads the "from" field of the Operation. -func (o Operation) From() (string, error) { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") -} - -func (o Operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -// ValueInterface decodes the operation value into an interface. -func (o Operation) ValueInterface() (interface{}, error) { - if obj, ok := o["value"]; ok && obj != nil { - var v interface{} - - err := json.Unmarshal(*obj, &v) - - if err != nil { - return nil, err - } - - return v, nil - } - - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil || next.raw == nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -// set should only be used to implement the "replace" operation, so "key" must -// be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - (*d)[idx] = val - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) - } - - sz := len(*d) + 1 - - ary := make([]*lazyNode, sz) - - cur := *d - - if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(ary) - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx < 0 { - if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(cur) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.add(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) - } - - return nil -} - -func (p Patch) remove(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) replace(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") - } - - if path == "" { - val := op.value() - - if val.which == eRaw { - if !val.tryDoc() { - if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") - } - } - } - - switch val.which { - case eAry: - *doc = &val.ary - case eDoc: - *doc = &val.doc - case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") - } - - return nil - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) - } - - err = con.set(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) move(doc *container, op Operation) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) - } - - err = con.add(key, val) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) - } - - return nil -} - -func (p Patch) test(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") - } - - if path == "" { - var self lazyNode - - switch sv := (*doc).(type) { - case *partialDoc: - self.doc = *sv - self.which = eDoc - case *partialArray: - self.ary = *sv - self.which = eAry - } - - if self.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) - } - - if val == nil { - if op.value() == nil || op.value().raw == nil { - return nil - } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) - } - - valCopy, sz, err := deepCopy(val) - if err != nil { - return errors.Wrapf(err, "error while performing deep copy") - } - - (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) - } - - err = con.add(key, valCopy) - if err != nil { - return errors.Wrapf(err, "error while adding value during copy") - } - - return nil -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - if len(doc) == 0 { - return doc, nil - } - - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - var accumulatedCopySize int64 - - for _, op := range p { - switch op.Kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) - default: - err = fmt.Errorf("Unexpected kind: %s", op.Kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3..f4e7dbf37b 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b1..daea9dd6d6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e5757549..fa854785d0 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d5..e4ac2a2fff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8..c349c326c7 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e40..36c311694c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a0..d8de5ab76f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015..5eb5dbc66f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d61..c54a630838 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc499..0760efe916 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 0000000000..b0eab10090 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 0000000000..928319fb09 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 0000000000..3186b0c349 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 0000000000..f69fdb930f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 0000000000..607e683bd7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 0000000000..35c734be43 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 0000000000..e5b3b6f694 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 0000000000..1dd455bc5a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 0000000000..f1b2e73bd5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 0000000000..52bf4ce53b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 0000000000..547df1df84 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 0000000000..7daa45e19e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 0000000000..30976ce973 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 0000000000..37dfeddc28 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 0000000000..a72c649549 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae653..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b885..f65e8fe3ed 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78f..a29fc7aab6 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e968..0000000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a4..0000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go deleted file mode 100644 index e810e6fea1..0000000000 --- a/vendor/github.com/golang/protobuf/proto/buffer.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - WireVarint = 0 - WireFixed32 = 5 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 -) - -// EncodeVarint returns the varint encoded bytes of v. -func EncodeVarint(v uint64) []byte { - return protowire.AppendVarint(nil, v) -} - -// SizeVarint returns the length of the varint encoded bytes of v. -// This is equal to len(EncodeVarint(v)). -func SizeVarint(v uint64) int { - return protowire.SizeVarint(v) -} - -// DecodeVarint parses a varint encoded integer from b, -// returning the integer value and the length of the varint. -// It returns (0, 0) if there is a parse error. -func DecodeVarint(b []byte) (uint64, int) { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, 0 - } - return v, n -} - -// Buffer is a buffer for encoding and decoding the protobuf wire format. -// It may be reused between invocations to reduce memory usage. -type Buffer struct { - buf []byte - idx int - deterministic bool -} - -// NewBuffer allocates a new Buffer initialized with buf, -// where the contents of buf are considered the unread portion of the buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{buf: buf} -} - -// SetDeterministic specifies whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (b *Buffer) SetDeterministic(deterministic bool) { - b.deterministic = deterministic -} - -// SetBuf sets buf as the internal buffer, -// where the contents of buf are considered the unread portion of the buffer. -func (b *Buffer) SetBuf(buf []byte) { - b.buf = buf - b.idx = 0 -} - -// Reset clears the internal buffer of all written and unread data. -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.idx = 0 -} - -// Bytes returns the internal buffer. -func (b *Buffer) Bytes() []byte { - return b.buf -} - -// Unread returns the unread portion of the buffer. -func (b *Buffer) Unread() []byte { - return b.buf[b.idx:] -} - -// Marshal appends the wire-format encoding of m to the buffer. -func (b *Buffer) Marshal(m Message) error { - var err error - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// Unmarshal parses the wire-format message in the buffer and -// places the decoded results in m. -// It does not reset m before unmarshaling. -func (b *Buffer) Unmarshal(m Message) error { - err := UnmarshalMerge(b.Unread(), m) - b.idx = len(b.buf) - return err -} - -type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } - -func (m *unknownFields) String() string { panic("not implemented") } -func (m *unknownFields) Reset() { panic("not implemented") } -func (m *unknownFields) ProtoMessage() { panic("not implemented") } - -// DebugPrint dumps the encoded bytes of b with a header and footer including s -// to stdout. This is only intended for debugging. -func (*Buffer) DebugPrint(s string, b []byte) { - m := MessageReflect(new(unknownFields)) - m.SetUnknown(b) - b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) - fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) -} - -// EncodeVarint appends an unsigned varint encoding to the buffer. -func (b *Buffer) EncodeVarint(v uint64) error { - b.buf = protowire.AppendVarint(b.buf, v) - return nil -} - -// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag32(v uint64) error { - return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) -} - -// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag64(v uint64) error { - return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) -} - -// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed32(v uint64) error { - b.buf = protowire.AppendFixed32(b.buf, uint32(v)) - return nil -} - -// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed64(v uint64) error { - b.buf = protowire.AppendFixed64(b.buf, uint64(v)) - return nil -} - -// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. -func (b *Buffer) EncodeRawBytes(v []byte) error { - b.buf = protowire.AppendBytes(b.buf, v) - return nil -} - -// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. -// It does not validate whether v contains valid UTF-8. -func (b *Buffer) EncodeStringBytes(v string) error { - b.buf = protowire.AppendString(b.buf, v) - return nil -} - -// EncodeMessage appends a length-prefixed encoded message to the buffer. -func (b *Buffer) EncodeMessage(m Message) error { - var err error - b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// DecodeVarint consumes an encoded unsigned varint from the buffer. -func (b *Buffer) DecodeVarint() (uint64, error) { - v, n := protowire.ConsumeVarint(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag32() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil -} - -// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag64() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil -} - -// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed32() (uint64, error) { - v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed64() (uint64, error) { - v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. -// If alloc is specified, it returns a copy the raw bytes -// rather than a sub-slice of the buffer. -func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { - v, n := protowire.ConsumeBytes(b.buf[b.idx:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - b.idx += n - if alloc { - v = append([]byte(nil), v...) - } - return v, nil -} - -// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. -// It does not validate whether the raw bytes contain valid UTF-8. -func (b *Buffer) DecodeStringBytes() (string, error) { - v, n := protowire.ConsumeString(b.buf[b.idx:]) - if n < 0 { - return "", protowire.ParseError(n) - } - b.idx += n - return v, nil -} - -// DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeMessage(m Message) error { - v, err := b.DecodeRawBytes(false) - if err != nil { - return err - } - return UnmarshalMerge(v, m) -} - -// DecodeGroup consumes a message group from the buffer. -// It assumes that the start group marker has already been consumed and -// consumes all bytes until (and including the end group marker). -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeGroup(m Message) error { - v, n, err := consumeGroup(b.buf[b.idx:]) - if err != nil { - return err - } - b.idx += n - return UnmarshalMerge(v, m) -} - -// consumeGroup parses b until it finds an end group marker, returning -// the raw bytes of the message (excluding the end group marker) and the -// the total length of the message (including the end group marker). -func consumeGroup(b []byte) ([]byte, int, error) { - b0 := b - depth := 1 // assume this follows a start group marker - for { - _, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return nil, 0, protowire.ParseError(tagLen) - } - b = b[tagLen:] - - var valLen int - switch wtyp { - case protowire.VarintType: - _, valLen = protowire.ConsumeVarint(b) - case protowire.Fixed32Type: - _, valLen = protowire.ConsumeFixed32(b) - case protowire.Fixed64Type: - _, valLen = protowire.ConsumeFixed64(b) - case protowire.BytesType: - _, valLen = protowire.ConsumeBytes(b) - case protowire.StartGroupType: - depth++ - case protowire.EndGroupType: - depth-- - default: - return nil, 0, errors.New("proto: cannot parse reserved wire type") - } - if valLen < 0 { - return nil, 0, protowire.ParseError(valLen) - } - b = b[valLen:] - - if depth == 0 { - return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go deleted file mode 100644 index d399bf069c..0000000000 --- a/vendor/github.com/golang/protobuf/proto/defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// SetDefaults sets unpopulated scalar fields to their default values. -// Fields within a oneof are not set even if they have a default value. -// SetDefaults is recursively called upon any populated message fields. -func SetDefaults(m Message) { - if m != nil { - setDefaults(MessageReflect(m)) - } -} - -func setDefaults(m protoreflect.Message) { - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if !m.Has(fd) { - if fd.HasDefault() && fd.ContainingOneof() == nil { - v := fd.Default() - if fd.Kind() == protoreflect.BytesKind { - v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes - } - m.Set(fd, v) - } - continue - } - } - - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - setDefaults(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - setDefaults(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - setDefaults(v.Message()) - return true - }) - } - } - return true - }) -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index e8db57e097..0000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - - protoV2 "google.golang.org/protobuf/proto" -) - -var ( - // Deprecated: No longer returned. - ErrNil = errors.New("proto: Marshal called with nil") - - // Deprecated: No longer returned. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") - - // Deprecated: No longer returned. - ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -) - -// Deprecated: Do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: Do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: Do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func RegisterMessageSetType(Message, int32, string) {} - -// Deprecated: Do not use. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// Deprecated: Do not use. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// Deprecated: Do not use; this type existed for intenal-use only. -type InternalMessageInfo struct{} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) DiscardUnknown(m Message) { - DiscardUnknown(m) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { - return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Size(m Message) int { - return protoV2.Size(MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { - return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) -} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index 2187e877fa..0000000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -func DiscardUnknown(m Message) { - if m != nil { - discardUnknown(MessageReflect(m)) - } -} - -func discardUnknown(m protoreflect.Message) { - m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - discardUnknown(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - discardUnknown(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - discardUnknown(v.Message()) - return true - }) - } - } - return true - }) - - // Discard unknown fields. - if len(m.GetUnknown()) > 0 { - m.SetUnknown(nil) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 42fc120c97..0000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -type ( - // ExtensionDesc represents an extension descriptor and - // is used to interact with an extension field in a message. - // - // Variables of this type are generated in code by protoc-gen-go. - ExtensionDesc = protoimpl.ExtensionInfo - - // ExtensionRange represents a range of message extensions. - // Used in code generated by protoc-gen-go. - ExtensionRange = protoiface.ExtensionRangeV1 - - // Deprecated: Do not use; this is an internal type. - Extension = protoimpl.ExtensionFieldV1 - - // Deprecated: Do not use; this is an internal type. - XXX_InternalExtensions = protoimpl.ExtensionFields -) - -// ErrMissingExtension reports whether the extension was not present. -var ErrMissingExtension = errors.New("proto: missing extension") - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -// HasExtension reports whether the extension field is present in m -// either as an explicitly populated field or as an unknown field. -func HasExtension(m Message, xt *ExtensionDesc) (has bool) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return false - } - - // Check whether any populated known field matches the field number. - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - has = mr.Has(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - has = int32(fd.Number()) == xt.Field - return !has - }) - } - - // Check whether any unknown field matches the field number. - for b := mr.GetUnknown(); !has && len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - has = int32(num) == xt.Field - b = b[n:] - } - return has -} - -// ClearExtension removes the extension field from m -// either as an explicitly populated field or as an unknown field. -func ClearExtension(m Message, xt *ExtensionDesc) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - mr.Clear(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if int32(fd.Number()) == xt.Field { - mr.Clear(fd) - return false - } - return true - }) - } - clearUnknown(mr, fieldNum(xt.Field)) -} - -// ClearAllExtensions clears all extensions from m. -// This includes populated fields and unknown fields in the extension range. -func ClearAllExtensions(m Message) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.IsExtension() { - mr.Clear(fd) - } - return true - }) - clearUnknown(mr, mr.Descriptor().ExtensionRanges()) -} - -// GetExtension retrieves a proto2 extended field from m. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes for the extension field. -func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Retrieve the unknown fields for this extension field. - var bo protoreflect.RawFields - for bi := mr.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if int32(num) == xt.Field { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - - // For type incomplete descriptors, only retrieve the unknown fields. - if xt.ExtensionType == nil { - return []byte(bo), nil - } - - // If the extension field only exists as unknown fields, unmarshal it. - // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - if !mr.Has(xtd) && len(bo) > 0 { - m2 := mr.New() - if err := (proto.UnmarshalOptions{ - Resolver: extensionResolver{xt}, - }.Unmarshal(bo, m2.Interface())); err != nil { - return nil, err - } - if m2.Has(xtd) { - mr.Set(xtd, m2.Get(xtd)) - clearUnknown(mr, fieldNum(xt.Field)) - } - } - - // Check whether the message has the extension field set or a default. - var pv protoreflect.Value - switch { - case mr.Has(xtd): - pv = mr.Get(xtd) - case xtd.HasDefault(): - pv = xtd.Default() - default: - return nil, ErrMissingExtension - } - - v := xt.InterfaceOf(pv) - rv := reflect.ValueOf(v) - if isScalarKind(rv.Kind()) { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - return v, nil -} - -// extensionResolver is a custom extension resolver that stores a single -// extension type that takes precedence over the global registry. -type extensionResolver struct{ xt protoreflect.ExtensionType } - -func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -// GetExtensions returns a list of the extensions values present in m, -// corresponding with the provided list of extension descriptors, xts. -// If an extension is missing in m, the corresponding value is nil. -func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return nil, errNotExtendable - } - - vs := make([]interface{}, len(xts)) - for i, xt := range xts { - v, err := GetExtension(m, xt) - if err != nil { - if err == ErrMissingExtension { - continue - } - return vs, err - } - vs[i] = v - } - return vs, nil -} - -// SetExtension sets an extension field in m to the provided value. -func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return errNotExtendable - } - - rv := reflect.ValueOf(v) - if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) - } - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) - } - if isScalarKind(rv.Elem().Kind()) { - v = rv.Elem().Interface() - } - } - - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - mr.Set(xtd, xt.ValueOf(v)) - clearUnknown(mr, fieldNum(xt.Field)) - return nil -} - -// SetRawExtension inserts b into the unknown fields of m. -// -// Deprecated: Use Message.ProtoReflect.SetUnknown instead. -func SetRawExtension(m Message, fnum int32, b []byte) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - // Verify that the raw field is valid. - for b0 := b; len(b0) > 0; { - num, _, n := protowire.ConsumeField(b0) - if int32(num) != fnum { - panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) - } - b0 = b0[n:] - } - - ClearExtension(m, &ExtensionDesc{Field: fnum}) - mr.SetUnknown(append(mr.GetUnknown(), b...)) -} - -// ExtensionDescs returns a list of extension descriptors found in m, -// containing descriptors for both populated extension fields in m and -// also unknown fields of m that are in the extension range. -// For the later case, an type incomplete descriptor is provided where only -// the ExtensionDesc.Field field is populated. -// The order of the extension descriptors is undefined. -func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Collect a set of known extension descriptors. - extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) - mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor) - if xd, ok := xt.Type().(*ExtensionDesc); ok { - extDescs[fd.Number()] = xd - } - } - return true - }) - - // Collect a set of unknown extension descriptors. - extRanges := mr.Descriptor().ExtensionRanges() - for b := mr.GetUnknown(); len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - if extRanges.Has(num) && extDescs[num] == nil { - extDescs[num] = nil - } - b = b[n:] - } - - // Transpose the set of descriptors into a list. - var xts []*ExtensionDesc - for num, xt := range extDescs { - if xt == nil { - xt = &ExtensionDesc{Field: int32(num)} - } - xts = append(xts, xt) - } - return xts, nil -} - -// isValidExtension reports whether xtd is a valid extension descriptor for md. -func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { - return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) -} - -// isScalarKind reports whether k is a protobuf scalar kind (except bytes). -// This function exists for historical reasons since the representation of -// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. -func isScalarKind(k reflect.Kind) bool { - switch k { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - return true - default: - return false - } -} - -// clearUnknown removes unknown fields from m where remover.Has reports true. -func clearUnknown(m protoreflect.Message, remover interface { - Has(protoreflect.FieldNumber) bool -}) { - var bo protoreflect.RawFields - for bi := m.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if !remover.Has(num) { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - if bi := m.GetUnknown(); len(bi) != len(bo) { - m.SetUnknown(bo) - } -} - -type fieldNum protoreflect.FieldNumber - -func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { - return protoreflect.FieldNumber(n1) == n2 -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dcdc2202fa..0000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// StructProperties represents protocol buffer type information for a -// generated protobuf message in the open-struct API. -// -// Deprecated: Do not use. -type StructProperties struct { - // Prop are the properties for each field. - // - // Fields belonging to a oneof are stored in OneofTypes instead, with a - // single Properties representing the parent oneof held here. - // - // The order of Prop matches the order of fields in the Go struct. - // Struct fields that are not related to protobufs have a "XXX_" prefix - // in the Properties.Name and must be ignored by the user. - Prop []*Properties - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the protobuf field name. - OneofTypes map[string]*OneofProperties -} - -// Properties represents the type information for a protobuf message field. -// -// Deprecated: Do not use. -type Properties struct { - // Name is a placeholder name with little meaningful semantic value. - // If the name has an "XXX_" prefix, the entire Properties must be ignored. - Name string - // OrigName is the protobuf field name or oneof name. - OrigName string - // JSONName is the JSON name for the protobuf field. - JSONName string - // Enum is a placeholder name for enums. - // For historical reasons, this is neither the Go name for the enum, - // nor the protobuf name for the enum. - Enum string // Deprecated: Do not use. - // Weak contains the full name of the weakly referenced message. - Weak string - // Wire is a string representation of the wire type. - Wire string - // WireType is the protobuf wire type for the field. - WireType int - // Tag is the protobuf field number. - Tag int - // Required reports whether this is a required field. - Required bool - // Optional reports whether this is a optional field. - Optional bool - // Repeated reports whether this is a repeated field. - Repeated bool - // Packed reports whether this is a packed repeated field of scalars. - Packed bool - // Proto3 reports whether this field operates under the proto3 syntax. - Proto3 bool - // Oneof reports whether this field belongs within a oneof. - Oneof bool - - // Default is the default value in string form. - Default string - // HasDefault reports whether the field has a default value. - HasDefault bool - - // MapKeyProp is the properties for the key field for a map field. - MapKeyProp *Properties - // MapValProp is the properties for the value field for a map field. - MapValProp *Properties -} - -// OneofProperties represents the type information for a protobuf oneof. -// -// Deprecated: Do not use. -type OneofProperties struct { - // Type is a pointer to the generated wrapper type for the field value. - // This is nil for messages that are not in the open-struct API. - Type reflect.Type - // Field is the index into StructProperties.Prop for the containing oneof. - Field int - // Prop is the properties for the field. - Prop *Properties -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," + strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != "" { - s += ",json=" + p.JSONName - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if len(p.Weak) > 0 { - s += ",weak=" + p.Weak - } - if p.Proto3 { - s += ",proto3" - } - if p.Oneof { - s += ",oneof" - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(tag string) { - // For example: "bytes,49,opt,name=foo,def=hello!" - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - p.OrigName = s[len("name="):] - case strings.HasPrefix(s, "json="): - p.JSONName = s[len("json="):] - case strings.HasPrefix(s, "enum="): - p.Enum = s[len("enum="):] - case strings.HasPrefix(s, "weak="): - p.Weak = s[len("weak="):] - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - p.Tag = int(n) - case s == "opt": - p.Optional = true - case s == "req": - p.Required = true - case s == "rep": - p.Repeated = true - case s == "varint" || s == "zigzag32" || s == "zigzag64": - p.Wire = s - p.WireType = WireVarint - case s == "fixed32": - p.Wire = s - p.WireType = WireFixed32 - case s == "fixed64": - p.Wire = s - p.WireType = WireFixed64 - case s == "bytes": - p.Wire = s - p.WireType = WireBytes - case s == "group": - p.Wire = s - p.WireType = WireStartGroup - case s == "packed": - p.Packed = true - case s == "proto3": - p.Proto3 = true - case s == "oneof": - p.Oneof = true - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - p.HasDefault = true - p.Default, i = tag[len("def="):], len(tag) - } - tag = strings.TrimPrefix(tag[i:], ",") - } -} - -// Init populates the properties from a protocol buffer struct tag. -// -// Deprecated: Do not use. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - - if typ != nil && typ.Kind() == reflect.Map { - p.MapKeyProp = new(Properties) - p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) - p.MapValProp = new(Properties) - p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) - } -} - -var propertiesCache sync.Map // map[reflect.Type]*StructProperties - -// GetProperties returns the list of properties for the type represented by t, -// which must be a generated protocol buffer message in the open-struct API, -// where protobuf message fields are represented by exported Go struct fields. -// -// Deprecated: Use protobuf reflection instead. -func GetProperties(t reflect.Type) *StructProperties { - if p, ok := propertiesCache.Load(t); ok { - return p.(*StructProperties) - } - p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) - return p.(*StructProperties) -} - -func newProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - - var hasOneof bool - prop := new(StructProperties) - - // Construct a list of properties for each field in the struct. - for i := 0; i < t.NumField(); i++ { - p := new(Properties) - f := t.Field(i) - tagField := f.Tag.Get("protobuf") - p.Init(f.Type, f.Name, tagField, &f) - - tagOneof := f.Tag.Get("protobuf_oneof") - if tagOneof != "" { - hasOneof = true - p.OrigName = tagOneof - } - - // Rename unrelated struct fields with the "XXX_" prefix since so much - // user code simply checks for this to exclude special fields. - if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { - p.Name = "XXX_" + p.Name - p.OrigName = "XXX_" + p.OrigName - } else if p.Weak != "" { - p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field - } - - prop.Prop = append(prop.Prop, p) - } - - // Construct a mapping of oneof field names to properties. - if hasOneof { - var oneofWrappers []interface{} - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) - } - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) - } - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { - if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { - oneofWrappers = m.ProtoMessageInfo().OneofWrappers - } - } - - prop.OneofTypes = make(map[string]*OneofProperties) - for _, wrapper := range oneofWrappers { - p := &OneofProperties{ - Type: reflect.ValueOf(wrapper).Type(), // *T - Prop: new(Properties), - } - f := p.Type.Elem().Field(0) - p.Prop.Name = f.Name - p.Prop.Parse(f.Tag.Get("protobuf")) - - // Determine the struct field that contains this oneof. - // Each wrapper is assignable to exactly one parent field. - var foundOneof bool - for i := 0; i < t.NumField() && !foundOneof; i++ { - if p.Type.AssignableTo(t.Field(i).Type) { - p.Field = i - foundOneof = true - } - } - if !foundOneof { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - prop.OneofTypes[p.Prop.OrigName] = p - } - } - - return prop -} - -func (sp *StructProperties) Len() int { return len(sp.Prop) } -func (sp *StructProperties) Less(i, j int) bool { return false } -func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go deleted file mode 100644 index 5aee89c323..0000000000 --- a/vendor/github.com/golang/protobuf/proto/proto.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functionality for handling protocol buffer messages. -// In particular, it provides marshaling and unmarshaling between a protobuf -// message and the binary wire format. -// -// See https://developers.google.com/protocol-buffers/docs/gotutorial for -// more information. -// -// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - ProtoPackageIsVersion1 = true - ProtoPackageIsVersion2 = true - ProtoPackageIsVersion3 = true - ProtoPackageIsVersion4 = true -) - -// GeneratedEnum is any enum type generated by protoc-gen-go -// which is a named int32 kind. -// This type exists for documentation purposes. -type GeneratedEnum interface{} - -// GeneratedMessage is any message type generated by protoc-gen-go -// which is a pointer to a named struct kind. -// This type exists for documentation purposes. -type GeneratedMessage interface{} - -// Message is a protocol buffer message. -// -// This is the v1 version of the message interface and is marginally better -// than an empty interface as it lacks any method to programatically interact -// with the contents of the message. -// -// A v2 message is declared in "google.golang.org/protobuf/proto".Message and -// exposes protobuf reflection as a first-class feature of the interface. -// -// To convert a v1 message to a v2 message, use the MessageV2 function. -// To convert a v2 message to a v1 message, use the MessageV1 function. -type Message = protoiface.MessageV1 - -// MessageV1 converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func MessageV1(m GeneratedMessage) protoiface.MessageV1 { - return protoimpl.X.ProtoMessageV1Of(m) -} - -// MessageV2 converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func MessageV2(m GeneratedMessage) protoV2.Message { - return protoimpl.X.ProtoMessageV2Of(m) -} - -// MessageReflect returns a reflective view for a message. -// It returns nil if m is nil. -func MessageReflect(m Message) protoreflect.Message { - return protoimpl.X.MessageOf(m) -} - -// Marshaler is implemented by messages that can marshal themselves. -// This interface is used by the following functions: Size, Marshal, -// Buffer.Marshal, and Buffer.EncodeMessage. -// -// Deprecated: Do not implement. -type Marshaler interface { - // Marshal formats the encoded bytes of the message. - // It should be deterministic and emit valid protobuf wire data. - // The caller takes ownership of the returned buffer. - Marshal() ([]byte, error) -} - -// Unmarshaler is implemented by messages that can unmarshal themselves. -// This interface is used by the following functions: Unmarshal, UnmarshalMerge, -// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. -// -// Deprecated: Do not implement. -type Unmarshaler interface { - // Unmarshal parses the encoded bytes of the protobuf wire input. - // The provided buffer is only valid for during method call. - // It should not reset the receiver message. - Unmarshal([]byte) error -} - -// Merger is implemented by messages that can merge themselves. -// This interface is used by the following functions: Clone and Merge. -// -// Deprecated: Do not implement. -type Merger interface { - // Merge merges the contents of src into the receiver message. - // It clones all data structures in src such that it aliases no mutable - // memory referenced by src. - Merge(src Message) -} - -// RequiredNotSetError is an error type returned when -// marshaling or unmarshaling a message with missing required fields. -type RequiredNotSetError struct { - err error -} - -func (e *RequiredNotSetError) Error() string { - if e.err != nil { - return e.err.Error() - } - return "proto: required field not set" -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -func checkRequiredNotSet(m protoV2.Message) error { - if err := protoV2.CheckInitialized(m); err != nil { - return &RequiredNotSetError{err: err} - } - return nil -} - -// Clone returns a deep copy of src. -func Clone(src Message) Message { - return MessageV1(protoV2.Clone(MessageV2(src))) -} - -// Merge merges src into dst, which must be messages of the same type. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -func Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they are the same protobuf message type, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - return protoV2.Equal(MessageV2(x), MessageV2(y)) -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go deleted file mode 100644 index 066b4323b4..0000000000 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// filePath is the path to the proto source file. -type filePath = string // e.g., "google/protobuf/descriptor.proto" - -// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. -type fileDescGZIP = []byte - -var fileCache sync.Map // map[filePath]fileDescGZIP - -// RegisterFile is called from generated code to register the compressed -// FileDescriptorProto with the file path for a proto source file. -// -// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. -func RegisterFile(s filePath, d fileDescGZIP) { - // Decompress the descriptor. - zr, err := gzip.NewReader(bytes.NewReader(d)) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - - // Construct a protoreflect.FileDescriptor from the raw descriptor. - // Note that DescBuilder.Build automatically registers the constructed - // file descriptor with the v2 registry. - protoimpl.DescBuilder{RawDescriptor: b}.Build() - - // Locally cache the raw descriptor form for the file. - fileCache.Store(s, d) -} - -// FileDescriptor returns the compressed FileDescriptorProto given the file path -// for a proto source file. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. -func FileDescriptor(s filePath) fileDescGZIP { - if v, ok := fileCache.Load(s); ok { - return v.(fileDescGZIP) - } - - // Find the descriptor in the v2 registry. - var b []byte - if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) - } - - // Locally cache the raw descriptor form for the file. - if len(b) > 0 { - v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) - return v.(fileDescGZIP) - } - return nil -} - -// enumName is the name of an enum. For historical reasons, the enum name is -// neither the full Go name nor the full protobuf name of the enum. -// The name is the dot-separated combination of just the proto package that the -// enum is declared within followed by the Go type name of the generated enum. -type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" - -// enumsByName maps enum values by name to their numeric counterpart. -type enumsByName = map[string]int32 - -// enumsByNumber maps enum values by number to their name counterpart. -type enumsByNumber = map[int32]string - -var enumCache sync.Map // map[enumName]enumsByName -var numFilesCache sync.Map // map[protoreflect.FullName]int - -// RegisterEnum is called from the generated code to register the mapping of -// enum value names to enum numbers for the enum identified by s. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. -func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { - if _, ok := enumCache.Load(s); ok { - panic("proto: duplicate enum registered: " + s) - } - enumCache.Store(s, m) - - // This does not forward registration to the v2 registry since this API - // lacks sufficient information to construct a complete v2 enum descriptor. -} - -// EnumValueMap returns the mapping from enum value names to enum numbers for -// the enum of the given name. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. -func EnumValueMap(s enumName) enumsByName { - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - - // Check whether the cache is stale. If the number of files in the current - // package differs, then it means that some enums may have been recently - // registered upstream that we do not know about. - var protoPkg protoreflect.FullName - if i := strings.LastIndexByte(s, '.'); i >= 0 { - protoPkg = protoreflect.FullName(s[:i]) - } - v, _ := numFilesCache.Load(protoPkg) - numFiles, _ := v.(int) - if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { - return nil // cache is up-to-date; was not found earlier - } - - // Update the enum cache for all enums declared in the given proto package. - numFiles = 0 - protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { - walkEnums(fd, func(ed protoreflect.EnumDescriptor) { - name := protoimpl.X.LegacyEnumName(ed) - if _, ok := enumCache.Load(name); !ok { - m := make(enumsByName) - evs := ed.Values() - for i := evs.Len() - 1; i >= 0; i-- { - ev := evs.Get(i) - m[string(ev.Name())] = int32(ev.Number()) - } - enumCache.LoadOrStore(name, m) - } - }) - numFiles++ - return true - }) - numFilesCache.Store(protoPkg, numFiles) - - // Check cache again for enum map. - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - return nil -} - -// walkEnums recursively walks all enums declared in d. -func walkEnums(d interface { - Enums() protoreflect.EnumDescriptors - Messages() protoreflect.MessageDescriptors -}, f func(protoreflect.EnumDescriptor)) { - eds := d.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - } - mds := d.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - walkEnums(mds.Get(i), f) - } -} - -// messageName is the full name of protobuf message. -type messageName = string - -var messageTypeCache sync.Map // map[messageName]reflect.Type - -// RegisterType is called from generated code to register the message Go type -// for a message of the given name. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. -func RegisterType(m Message, s messageName) { - mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) - if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { - panic(err) - } - messageTypeCache.Store(s, reflect.TypeOf(m)) -} - -// RegisterMapType is called from generated code to register the Go map type -// for a protobuf message representing a map entry. -// -// Deprecated: Do not use. -func RegisterMapType(m interface{}, s messageName) { - t := reflect.TypeOf(m) - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid map kind: %v", t)) - } - if _, ok := messageTypeCache.Load(s); ok { - panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) - } - messageTypeCache.Store(s, t) -} - -// MessageType returns the message type for a named message. -// It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. -func MessageType(s messageName) reflect.Type { - if v, ok := messageTypeCache.Load(s); ok { - return v.(reflect.Type) - } - - // Derive the message type from the v2 registry. - var t reflect.Type - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { - t = messageGoType(mt) - } - - // If we could not get a concrete type, it is possible that it is a - // pseudo-message for a map entry. - if t == nil { - d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) - if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { - kt := goTypeForField(md.Fields().ByNumber(1)) - vt := goTypeForField(md.Fields().ByNumber(2)) - t = reflect.MapOf(kt, vt) - } - } - - // Locally cache the message type for the given name. - if t != nil { - v, _ := messageTypeCache.LoadOrStore(s, t) - return v.(reflect.Type) - } - return nil -} - -func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { - switch k := fd.Kind(); k { - case protoreflect.EnumKind: - if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { - return enumGoType(et) - } - return reflect.TypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind, protoreflect.GroupKind: - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { - return messageGoType(mt) - } - return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() - default: - return reflect.TypeOf(fd.Default().Interface()) - } -} - -func enumGoType(et protoreflect.EnumType) reflect.Type { - return reflect.TypeOf(et.New(0)) -} - -func messageGoType(mt protoreflect.MessageType) reflect.Type { - return reflect.TypeOf(MessageV1(mt.Zero().Interface())) -} - -// MessageName returns the full protobuf name for the given message type. -// -// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. -func MessageName(m Message) messageName { - if m == nil { - return "" - } - if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { - return m.XXX_MessageName() - } - return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) -} - -// RegisterExtension is called from the generated code to register -// the extension descriptor. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. -func RegisterExtension(d *ExtensionDesc) { - if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { - panic(err) - } -} - -type extensionsByNumber = map[int32]*ExtensionDesc - -var extensionCache sync.Map // map[messageName]extensionsByNumber - -// RegisteredExtensions returns a map of the registered extensions for the -// provided protobuf message, indexed by the extension field number. -// -// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. -func RegisteredExtensions(m Message) extensionsByNumber { - // Check whether the cache is stale. If the number of extensions for - // the given message differs, then it means that some extensions were - // recently registered upstream that we do not know about. - s := MessageName(m) - v, _ := extensionCache.Load(s) - xs, _ := v.(extensionsByNumber) - if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { - return xs // cache is up-to-date - } - - // Cache is stale, re-compute the extensions map. - xs = make(extensionsByNumber) - protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { - if xd, ok := xt.(*ExtensionDesc); ok { - xs[int32(xt.TypeDescriptor().Number())] = xd - } else { - // TODO: This implies that the protoreflect.ExtensionType is a - // custom type not generated by protoc-gen-go. We could try and - // convert the type to an ExtensionDesc. - } - return true - }) - extensionCache.Store(s, xs) - return xs -} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go deleted file mode 100644 index 47eb3e4450..0000000000 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ /dev/null @@ -1,801 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/prototext" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextUnmarshalV2 = false - -// ParseError is returned by UnmarshalText. -type ParseError struct { - Message string - - // Deprecated: Do not use. - Line, Offset int -} - -func (e *ParseError) Error() string { - if wrapTextUnmarshalV2 { - return e.Message - } - if e.Line == 1 { - return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) - } - return fmt.Sprintf("line %d: %v", e.Line, e.Message) -} - -// UnmarshalText parses a proto text formatted string into m. -func UnmarshalText(s string, m Message) error { - if u, ok := m.(encoding.TextUnmarshaler); ok { - return u.UnmarshalText([]byte(s)) - } - - m.Reset() - mi := MessageV2(m) - - if wrapTextUnmarshalV2 { - err := prototext.UnmarshalOptions{ - AllowPartial: true, - }.Unmarshal([]byte(s), mi) - if err != nil { - return &ParseError{Message: err.Error()} - } - return checkRequiredNotSet(mi) - } else { - if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { - return err - } - return checkRequiredNotSet(mi) - } -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { - md := m.Descriptor() - fds := md.Fields() - - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - seen := make(map[protoreflect.FieldNumber]bool) - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - if err := p.unmarshalExtensionOrAny(m, seen); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := protoreflect.Name(tok.value) - fd := fds.ByName(name) - switch { - case fd == nil: - gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { - fd = gd - } - case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: - fd = nil - case fd.IsWeak() && fd.Message().IsPlaceholder(): - fd = nil - } - if fd == nil { - typeName := string(md.FullName()) - if m, ok := m.Interface().(Message); ok { - t := reflect.TypeOf(m) - if t.Kind() == reflect.Ptr { - typeName = t.Elem().String() - } - } - return p.errorf("unknown field name %q in %v", name, typeName) - } - if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) - } - if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { - return p.errorf("non-repeated field %q was repeated", fd.Name()) - } - seen[fd.Number()] = true - - // Consume any colon. - if err := p.checkForColon(fd); err != nil { - return err - } - - // Parse into the field. - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - if v, err = p.unmarshalValue(v, fd); err != nil { - return err - } - m.Set(fd, v) - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - } - return nil -} - -func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { - name, err := p.consumeExtensionOrAnyName() - if err != nil { - return err - } - - // If it contains a slash, it's an Any type URL. - if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { - tok := p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - - mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) - if err != nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) - } - m2 := mt.New() - if err := p.unmarshalMessage(m2, terminator); err != nil { - return err - } - b, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) - } - - urlFD := m.Descriptor().Fields().ByName("type_url") - valFD := m.Descriptor().Fields().ByName("value") - if seen[urlFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) - } - if seen[valFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) - } - m.Set(urlFD, protoreflect.ValueOfString(name)) - m.Set(valFD, protoreflect.ValueOfBytes(b)) - seen[urlFD.Number()] = true - seen[valFD.Number()] = true - return nil - } - - xname := protoreflect.FullName(name) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(m.Descriptor()) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - return p.errorf("unrecognized extension %q", name) - } - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) - } - - if err := p.checkForColon(fd); err != nil { - return err - } - - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - v, err = p.unmarshalValue(v, fd) - if err != nil { - return err - } - m.Set(fd, v) - return p.consumeOptionalSeparator() -} - -func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch { - case fd.IsList(): - lv := v.List() - var err error - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return v, p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return v, nil - } - - // One value of the repeated field. - p.back() - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - return v, nil - case fd.IsMap(): - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - - keyFD := fd.MapKey() - valFD := fd.MapValue() - - mv := v.Map() - kv := keyFD.Default() - vv := mv.NewValue() - for { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == terminator { - break - } - var err error - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return v, err - } - if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - case "value": - if err := p.checkForColon(valFD); err != nil { - return v, err - } - if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - default: - p.back() - return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - mv.Set(kv.MapKey(), vv) - return v, nil - default: - p.back() - return p.unmarshalSingularValue(v, fd) - } -} - -func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch fd.Kind() { - case protoreflect.BoolKind: - switch tok.value { - case "true", "1", "t", "True": - return protoreflect.ValueOfBool(true), nil - case "false", "0", "f", "False": - return protoreflect.ValueOfBool(false), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil - } - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil - } - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfUint32(uint32(x)), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfUint64(uint64(x)), nil - } - case protoreflect.FloatKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 32); err == nil { - return protoreflect.ValueOfFloat32(float32(x)), nil - } - case protoreflect.DoubleKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 64); err == nil { - return protoreflect.ValueOfFloat64(float64(x)), nil - } - case protoreflect.StringKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfString(tok.unquoted), nil - } - case protoreflect.BytesKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil - } - case protoreflect.EnumKind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil - } - vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) - if vd != nil { - return protoreflect.ValueOfEnum(vd.Number()), nil - } - case protoreflect.MessageKind, protoreflect.GroupKind: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - err := p.unmarshalMessage(v.Message(), terminator) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } - return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - if fd.Message() == nil { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -// consumeExtensionOrAnyName consumes an extension name or an Any type URL and -// the following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtensionOrAnyName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in unmarshalMessage to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -var errBadUTF8 = errors.New("proto: bad UTF-8") - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go deleted file mode 100644 index a31134eeb3..0000000000 --- a/vendor/github.com/golang/protobuf/proto/text_encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "sort" - "strings" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextMarshalV2 = false - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line) - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes the proto text format of m to w. -func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { - b, err := tm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// Text returns a proto text formatted string of m. -func (tm *TextMarshaler) Text(m Message) string { - b, _ := tm.marshal(m) - return string(b) -} - -func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return []byte(""), nil - } - - if wrapTextMarshalV2 { - if m, ok := m.(encoding.TextMarshaler); ok { - return m.MarshalText() - } - - opts := prototext.MarshalOptions{ - AllowPartial: true, - EmitUnknown: true, - } - if !tm.Compact { - opts.Indent = " " - } - if !tm.ExpandAny { - opts.Resolver = (*protoregistry.Types)(nil) - } - return opts.Marshal(mr.Interface()) - } else { - w := &textWriter{ - compact: tm.Compact, - expandAny: tm.ExpandAny, - complete: true, - } - - if m, ok := m.(encoding.TextMarshaler); ok { - b, err := m.MarshalText() - if err != nil { - return nil, err - } - w.Write(b) - return w.buf, nil - } - - err := w.writeMessage(mr) - return w.buf, err - } -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } - -// MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } - -// CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } - -// CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } - -var ( - newline = []byte("\n") - endBraceNewline = []byte("}\n") - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - compact bool // same as TextMarshaler.Compact - expandAny bool // same as TextMarshaler.ExpandAny - complete bool // whether the current position is a complete line - indent int // indentation level; never negative - buf []byte -} - -func (w *textWriter) Write(p []byte) (n int, _ error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, p...) - w.complete = false - return len(p), nil - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - w.buf = append(w.buf, ' ') - n++ - } - w.buf = append(w.buf, frag...) - n += len(frag) - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - w.buf = append(w.buf, frag...) - n += len(frag) - if i+1 < len(frags) { - w.buf = append(w.buf, '\n') - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, c) - w.complete = c == '\n' - return nil -} - -func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - - if fd.Kind() != protoreflect.GroupKind { - w.buf = append(w.buf, fd.Name()...) - w.WriteByte(':') - } else { - // Use message type name for group field name. - w.buf = append(w.buf, fd.Message().Name()...) - } - - if !w.compact { - w.WriteByte(' ') - } -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { - md := m.Descriptor() - fdURL := md.Fields().ByName("type_url") - fdVal := md.Fields().ByName("value") - - url := m.Get(fdURL).String() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - return false, nil - } - - b := m.Get(fdVal).Bytes() - m2 := mt.New() - if err := proto.Unmarshal(b, m2.Interface()); err != nil { - return false, nil - } - w.Write([]byte("[")) - if requiresQuotes(url) { - w.writeQuotedString(url) - } else { - w.Write([]byte(url)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.indent++ - } - if err := w.writeMessage(m2); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.indent-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (w *textWriter) writeMessage(m protoreflect.Message) error { - md := m.Descriptor() - if w.expandAny && md.FullName() == "google.protobuf.Any" { - if canExpand, err := w.writeProto3Any(m); canExpand { - return err - } - } - - fds := md.Fields() - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - if fd == nil || !m.Has(fd) { - continue - } - - switch { - case fd.IsList(): - lv := m.Get(fd).List() - for j := 0; j < lv.Len(); j++ { - w.writeName(fd) - v := lv.Get(j) - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - } - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := m.Get(fd).Map() - - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - for _, entry := range entries { - w.writeName(fd) - w.WriteByte('<') - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - w.writeName(kfd) - if err := w.writeSingularValue(entry.key, kfd); err != nil { - return err - } - w.WriteByte('\n') - w.writeName(vfd) - if err := w.writeSingularValue(entry.val, vfd); err != nil { - return err - } - w.WriteByte('\n') - w.indent-- - w.WriteByte('>') - w.WriteByte('\n') - } - default: - w.writeName(fd) - if err := w.writeSingularValue(m.Get(fd), fd); err != nil { - return err - } - w.WriteByte('\n') - } - } - - if b := m.GetUnknown(); len(b) > 0 { - w.writeUnknownFields(b) - } - return w.writeExtensions(m) -} - -func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - switch fd.Kind() { - case protoreflect.FloatKind, protoreflect.DoubleKind: - switch vf := v.Float(); { - case math.IsInf(vf, +1): - w.Write(posInf) - case math.IsInf(vf, -1): - w.Write(negInf) - case math.IsNaN(vf): - w.Write(nan) - default: - fmt.Fprint(w, v.Interface()) - } - case protoreflect.StringKind: - // NOTE: This does not validate UTF-8 for historical reasons. - w.writeQuotedString(string(v.String())) - case protoreflect.BytesKind: - w.writeQuotedString(string(v.Bytes())) - case protoreflect.MessageKind, protoreflect.GroupKind: - var bra, ket byte = '<', '>' - if fd.Kind() == protoreflect.GroupKind { - bra, ket = '{', '}' - } - w.WriteByte(bra) - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - m := v.Message() - if m2, ok := m.Interface().(encoding.TextMarshaler); ok { - b, err := m2.MarshalText() - if err != nil { - return err - } - w.Write(b) - } else { - w.writeMessage(m) - } - w.indent-- - w.WriteByte(ket) - case protoreflect.EnumKind: - if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { - fmt.Fprint(w, ev.Name()) - } else { - fmt.Fprint(w, v.Enum()) - } - default: - fmt.Fprint(w, v.Interface()) - } - return nil -} - -// writeQuotedString writes a quoted string in the protocol buffer text format. -func (w *textWriter) writeQuotedString(s string) { - w.WriteByte('"') - for i := 0; i < len(s); i++ { - switch c := s[i]; c { - case '\n': - w.buf = append(w.buf, `\n`...) - case '\r': - w.buf = append(w.buf, `\r`...) - case '\t': - w.buf = append(w.buf, `\t`...) - case '"': - w.buf = append(w.buf, `\"`...) - case '\\': - w.buf = append(w.buf, `\\`...) - default: - if isPrint := c >= 0x20 && c < 0x7f; isPrint { - w.buf = append(w.buf, c) - } else { - w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) - } - } - } - w.WriteByte('"') -} - -func (w *textWriter) writeUnknownFields(b []byte) { - if !w.compact { - fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) - } - - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return - } - b = b[n:] - - if wtyp == protowire.EndGroupType { - w.indent-- - w.Write(endBraceNewline) - continue - } - fmt.Fprint(w, num) - if wtyp != protowire.StartGroupType { - w.WriteByte(':') - } - if !w.compact || wtyp == protowire.StartGroupType { - w.WriteByte(' ') - } - switch wtyp { - case protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed64Type: - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.BytesType: - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprintf(w, "%q", v) - case protowire.StartGroupType: - w.WriteByte('{') - w.indent++ - default: - fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) - } - w.WriteByte('\n') - } -} - -// writeExtensions writes all the extensions in m. -func (w *textWriter) writeExtensions(m protoreflect.Message) error { - md := m.Descriptor() - if md.ExtensionRanges().Len() == 0 { - return nil - } - - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - // For message set, use the name of the message as the extension name. - name := string(ext.desc.FullName()) - if isMessageSet(ext.desc.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - if !ext.desc.IsList() { - if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { - return err - } - } else { - lv := ext.val.List() - for i := 0; i < lv.Len(); i++ { - if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { - return err - } - } - } - } - return nil -} - -func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - fmt.Fprintf(w, "[%s]:", name) - if !w.compact { - w.WriteByte(' ') - } - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - for i := 0; i < w.indent*2; i++ { - w.buf = append(w.buf, ' ') - } - w.complete = false -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go deleted file mode 100644 index d7c28da5a7..0000000000 --- a/vendor/github.com/golang/protobuf/proto/wire.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - if m == nil { - return 0 - } - mi := MessageV2(m) - return protoV2.Size(mi) -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - b, err := marshalAppend(nil, m, false) - if b == nil { - b = zeroBytes - } - return b, err -} - -var zeroBytes = make([]byte, 0, 0) - -func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { - if m == nil { - return nil, ErrNil - } - mi := MessageV2(m) - nbuf, err := protoV2.MarshalOptions{ - Deterministic: deterministic, - AllowPartial: true, - }.MarshalAppend(buf, mi) - if err != nil { - return buf, err - } - if len(buf) == len(nbuf) { - if !mi.ProtoReflect().IsValid() { - return buf, ErrNil - } - } - return nbuf, checkRequiredNotSet(mi) -} - -// Unmarshal parses a wire-format message in b and places the decoded results in m. -// -// Unmarshal resets m before starting to unmarshal, so any existing data in m is always -// removed. Use UnmarshalMerge to preserve and append to existing data. -func Unmarshal(b []byte, m Message) error { - m.Reset() - return UnmarshalMerge(b, m) -} - -// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. -func UnmarshalMerge(b []byte, m Message) error { - mi := MessageV2(m) - out, err := protoV2.UnmarshalOptions{ - AllowPartial: true, - Merge: true, - }.UnmarshalState(protoiface.UnmarshalInput{ - Buf: b, - Message: mi.ProtoReflect(), - }) - if err != nil { - return err - } - if out.Flags&protoiface.UnmarshalInitialized > 0 { - return nil - } - return checkRequiredNotSet(mi) -} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go deleted file mode 100644 index 398e348599..0000000000 --- a/vendor/github.com/golang/protobuf/proto/wrappers.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int stores v in a new int32 value and returns a pointer to it. -// -// Deprecated: Use Int32 instead. -func Int(v int) *int32 { return Int32(int32(v)) } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index fdff3fdb4c..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33de..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d2..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd851..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3ef..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70d..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f807600..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/btree/LICENSE similarity index 100% rename from vendor/github.com/google/gofuzz/LICENSE rename to vendor/github.com/google/btree/LICENSE diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 0000000000..eab5dbf7ba --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,10 @@ +# BTree implementation for Go + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 0000000000..6f5184fef7 --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,893 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 0000000000..e44a0f4880 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go index 250c81e8c8..16ae66faa3 100644 --- a/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,8 +20,8 @@ import ( "os/exec" "strings" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" @@ -33,7 +33,7 @@ type ExtensionHandler struct { } // CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) { if context == nil || context.ExtensionHandlers == nil { return false, nil, nil } @@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl return handled, response, err } -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) { if extensionHandlers.Name != "" { yamlData, _ := yaml.Marshal(in) request := &extensions.ExtensionHandlerRequest{ diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go index a71df8abec..16c40d985f 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go +++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.3 +// protoc-gen-go v1.35.1 +// protoc v4.23.4 // source: extensions/extension.proto package gnostic_extension_v1 @@ -51,11 +51,9 @@ type Version struct { func (x *Version) Reset() { *x = Version{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Version) String() string { @@ -66,7 +64,7 @@ func (*Version) ProtoMessage() {} func (x *Version) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -123,11 +121,9 @@ type ExtensionHandlerRequest struct { func (x *ExtensionHandlerRequest) Reset() { *x = ExtensionHandlerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionHandlerRequest) String() string { @@ -138,7 +134,7 @@ func (*ExtensionHandlerRequest) ProtoMessage() {} func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -191,11 +187,9 @@ type ExtensionHandlerResponse struct { func (x *ExtensionHandlerResponse) Reset() { *x = ExtensionHandlerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionHandlerResponse) String() string { @@ -206,7 +200,7 @@ func (*ExtensionHandlerResponse) ProtoMessage() {} func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -257,11 +251,9 @@ type Wrapper struct { func (x *Wrapper) Reset() { *x = Wrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Wrapper) String() string { @@ -272,7 +264,7 @@ func (*Wrapper) ProtoMessage() {} func (x *Wrapper) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -367,7 +359,7 @@ func file_extensions_extension_proto_rawDescGZIP() []byte { } var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_extensions_extension_proto_goTypes = []interface{}{ +var file_extensions_extension_proto_goTypes = []any{ (*Version)(nil), // 0: gnostic.extension.v1.Version (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse @@ -390,56 +382,6 @@ func file_extensions_extension_proto_init() { if File_extensions_extension_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Wrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go index ec8afd0092..0768163e5a 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extensions.go +++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go @@ -19,8 +19,8 @@ import ( "log" "os" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) @@ -54,7 +54,7 @@ func Main(handler extensionHandler) { response.Errors = append(response.Errors, err.Error()) } else if handled { response.Handled = true - response.Value, err = ptypes.MarshalAny(output) + response.Value, err = anypb.New(output) if err != nil { response.Errors = append(response.Errors, err.Error()) } diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go index 65c4c913ce..3b930b3de2 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.3 +// protoc-gen-go v1.35.1 +// protoc v4.23.4 // source: openapiv2/OpenAPIv2.proto package openapi_v2 @@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` @@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct { func (x *AdditionalPropertiesItem) Reset() { *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AdditionalPropertiesItem) String() string { @@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {} func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -128,11 +127,9 @@ type Any struct { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -186,11 +183,9 @@ type ApiKeySecurity struct { func (x *ApiKeySecurity) Reset() { *x = ApiKeySecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ApiKeySecurity) String() string { @@ -201,7 +196,7 @@ func (*ApiKeySecurity) ProtoMessage() {} func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -263,11 +258,9 @@ type BasicAuthenticationSecurity struct { func (x *BasicAuthenticationSecurity) Reset() { *x = BasicAuthenticationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BasicAuthenticationSecurity) String() string { @@ -278,7 +271,7 @@ func (*BasicAuthenticationSecurity) ProtoMessage() {} func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -333,11 +326,9 @@ type BodyParameter struct { func (x *BodyParameter) Reset() { *x = BodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BodyParameter) String() string { @@ -348,7 +339,7 @@ func (*BodyParameter) ProtoMessage() {} func (x *BodyParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -422,11 +413,9 @@ type Contact struct { func (x *Contact) Reset() { *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Contact) String() string { @@ -437,7 +426,7 @@ func (*Contact) ProtoMessage() {} func (x *Contact) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -490,11 +479,9 @@ type Default struct { func (x *Default) Reset() { *x = Default{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Default) String() string { @@ -505,7 +492,7 @@ func (*Default) ProtoMessage() {} func (x *Default) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -538,11 +525,9 @@ type Definitions struct { func (x *Definitions) Reset() { *x = Definitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Definitions) String() string { @@ -553,7 +538,7 @@ func (*Definitions) ProtoMessage() {} func (x *Definitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -606,11 +591,9 @@ type Document struct { func (x *Document) Reset() { *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Document) String() string { @@ -621,7 +604,7 @@ func (*Document) ProtoMessage() {} func (x *Document) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -758,11 +741,9 @@ type Examples struct { func (x *Examples) Reset() { *x = Examples{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Examples) String() string { @@ -773,7 +754,7 @@ func (*Examples) ProtoMessage() {} func (x *Examples) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -808,11 +789,9 @@ type ExternalDocs struct { func (x *ExternalDocs) Reset() { *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExternalDocs) String() string { @@ -823,7 +802,7 @@ func (*ExternalDocs) ProtoMessage() {} func (x *ExternalDocs) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -879,11 +858,9 @@ type FileSchema struct { func (x *FileSchema) Reset() { *x = FileSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileSchema) String() string { @@ -894,7 +871,7 @@ func (*FileSchema) ProtoMessage() {} func (x *FileSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1016,11 +993,9 @@ type FormDataParameterSubSchema struct { func (x *FormDataParameterSubSchema) Reset() { *x = FormDataParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FormDataParameterSubSchema) String() string { @@ -1031,7 +1006,7 @@ func (*FormDataParameterSubSchema) ProtoMessage() {} func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1235,11 +1210,9 @@ type Header struct { func (x *Header) Reset() { *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Header) String() string { @@ -1250,7 +1223,7 @@ func (*Header) ProtoMessage() {} func (x *Header) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1433,11 +1406,9 @@ type HeaderParameterSubSchema struct { func (x *HeaderParameterSubSchema) Reset() { *x = HeaderParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HeaderParameterSubSchema) String() string { @@ -1448,7 +1419,7 @@ func (*HeaderParameterSubSchema) ProtoMessage() {} func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1627,11 +1598,9 @@ type Headers struct { func (x *Headers) Reset() { *x = Headers{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Headers) String() string { @@ -1642,7 +1611,7 @@ func (*Headers) ProtoMessage() {} func (x *Headers) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1685,11 +1654,9 @@ type Info struct { func (x *Info) Reset() { *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Info) String() string { @@ -1700,7 +1667,7 @@ func (*Info) ProtoMessage() {} func (x *Info) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1774,11 +1741,9 @@ type ItemsItem struct { func (x *ItemsItem) Reset() { *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ItemsItem) String() string { @@ -1789,7 +1754,7 @@ func (*ItemsItem) ProtoMessage() {} func (x *ItemsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1822,11 +1787,9 @@ type JsonReference struct { func (x *JsonReference) Reset() { *x = JsonReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *JsonReference) String() string { @@ -1837,7 +1800,7 @@ func (*JsonReference) ProtoMessage() {} func (x *JsonReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1880,11 +1843,9 @@ type License struct { func (x *License) Reset() { *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *License) String() string { @@ -1895,7 +1856,7 @@ func (*License) ProtoMessage() {} func (x *License) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1945,11 +1906,9 @@ type NamedAny struct { func (x *NamedAny) Reset() { *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedAny) String() string { @@ -1960,7 +1919,7 @@ func (*NamedAny) ProtoMessage() {} func (x *NamedAny) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2003,11 +1962,9 @@ type NamedHeader struct { func (x *NamedHeader) Reset() { *x = NamedHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedHeader) String() string { @@ -2018,7 +1975,7 @@ func (*NamedHeader) ProtoMessage() {} func (x *NamedHeader) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2061,11 +2018,9 @@ type NamedParameter struct { func (x *NamedParameter) Reset() { *x = NamedParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedParameter) String() string { @@ -2076,7 +2031,7 @@ func (*NamedParameter) ProtoMessage() {} func (x *NamedParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2119,11 +2074,9 @@ type NamedPathItem struct { func (x *NamedPathItem) Reset() { *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedPathItem) String() string { @@ -2134,7 +2087,7 @@ func (*NamedPathItem) ProtoMessage() {} func (x *NamedPathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2177,11 +2130,9 @@ type NamedResponse struct { func (x *NamedResponse) Reset() { *x = NamedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedResponse) String() string { @@ -2192,7 +2143,7 @@ func (*NamedResponse) ProtoMessage() {} func (x *NamedResponse) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2235,11 +2186,9 @@ type NamedResponseValue struct { func (x *NamedResponseValue) Reset() { *x = NamedResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedResponseValue) String() string { @@ -2250,7 +2199,7 @@ func (*NamedResponseValue) ProtoMessage() {} func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2293,11 +2242,9 @@ type NamedSchema struct { func (x *NamedSchema) Reset() { *x = NamedSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSchema) String() string { @@ -2308,7 +2255,7 @@ func (*NamedSchema) ProtoMessage() {} func (x *NamedSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2351,11 +2298,9 @@ type NamedSecurityDefinitionsItem struct { func (x *NamedSecurityDefinitionsItem) Reset() { *x = NamedSecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSecurityDefinitionsItem) String() string { @@ -2366,7 +2311,7 @@ func (*NamedSecurityDefinitionsItem) ProtoMessage() {} func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2409,11 +2354,9 @@ type NamedString struct { func (x *NamedString) Reset() { *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedString) String() string { @@ -2424,7 +2367,7 @@ func (*NamedString) ProtoMessage() {} func (x *NamedString) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2467,11 +2410,9 @@ type NamedStringArray struct { func (x *NamedStringArray) Reset() { *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedStringArray) String() string { @@ -2482,7 +2423,7 @@ func (*NamedStringArray) ProtoMessage() {} func (x *NamedStringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2517,6 +2458,7 @@ type NonBodyParameter struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *NonBodyParameter_HeaderParameterSubSchema // *NonBodyParameter_FormDataParameterSubSchema // *NonBodyParameter_QueryParameterSubSchema @@ -2526,11 +2468,9 @@ type NonBodyParameter struct { func (x *NonBodyParameter) Reset() { *x = NonBodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NonBodyParameter) String() string { @@ -2541,7 +2481,7 @@ func (*NonBodyParameter) ProtoMessage() {} func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2635,11 +2575,9 @@ type Oauth2AccessCodeSecurity struct { func (x *Oauth2AccessCodeSecurity) Reset() { *x = Oauth2AccessCodeSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2AccessCodeSecurity) String() string { @@ -2650,7 +2588,7 @@ func (*Oauth2AccessCodeSecurity) ProtoMessage() {} func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2729,11 +2667,9 @@ type Oauth2ApplicationSecurity struct { func (x *Oauth2ApplicationSecurity) Reset() { *x = Oauth2ApplicationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2ApplicationSecurity) String() string { @@ -2744,7 +2680,7 @@ func (*Oauth2ApplicationSecurity) ProtoMessage() {} func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2816,11 +2752,9 @@ type Oauth2ImplicitSecurity struct { func (x *Oauth2ImplicitSecurity) Reset() { *x = Oauth2ImplicitSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2ImplicitSecurity) String() string { @@ -2831,7 +2765,7 @@ func (*Oauth2ImplicitSecurity) ProtoMessage() {} func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2903,11 +2837,9 @@ type Oauth2PasswordSecurity struct { func (x *Oauth2PasswordSecurity) Reset() { *x = Oauth2PasswordSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2PasswordSecurity) String() string { @@ -2918,7 +2850,7 @@ func (*Oauth2PasswordSecurity) ProtoMessage() {} func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2985,11 +2917,9 @@ type Oauth2Scopes struct { func (x *Oauth2Scopes) Reset() { *x = Oauth2Scopes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Oauth2Scopes) String() string { @@ -3000,7 +2930,7 @@ func (*Oauth2Scopes) ProtoMessage() {} func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3051,11 +2981,9 @@ type Operation struct { func (x *Operation) Reset() { *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Operation) String() string { @@ -3066,7 +2994,7 @@ func (*Operation) ProtoMessage() {} func (x *Operation) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3178,6 +3106,7 @@ type Parameter struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *Parameter_BodyParameter // *Parameter_NonBodyParameter Oneof isParameter_Oneof `protobuf_oneof:"oneof"` @@ -3185,11 +3114,9 @@ type Parameter struct { func (x *Parameter) Reset() { *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Parameter) String() string { @@ -3200,7 +3127,7 @@ func (*Parameter) ProtoMessage() {} func (x *Parameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3263,11 +3190,9 @@ type ParameterDefinitions struct { func (x *ParameterDefinitions) Reset() { *x = ParameterDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParameterDefinitions) String() string { @@ -3278,7 +3203,7 @@ func (*ParameterDefinitions) ProtoMessage() {} func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3306,6 +3231,7 @@ type ParametersItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ParametersItem_Parameter // *ParametersItem_JsonReference Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` @@ -3313,11 +3239,9 @@ type ParametersItem struct { func (x *ParametersItem) Reset() { *x = ParametersItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParametersItem) String() string { @@ -3328,7 +3252,7 @@ func (*ParametersItem) ProtoMessage() {} func (x *ParametersItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3400,11 +3324,9 @@ type PathItem struct { func (x *PathItem) Reset() { *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathItem) String() string { @@ -3415,7 +3337,7 @@ func (*PathItem) ProtoMessage() {} func (x *PathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3535,11 +3457,9 @@ type PathParameterSubSchema struct { func (x *PathParameterSubSchema) Reset() { *x = PathParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathParameterSubSchema) String() string { @@ -3550,7 +3470,7 @@ func (*PathParameterSubSchema) ProtoMessage() {} func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3731,11 +3651,9 @@ type Paths struct { func (x *Paths) Reset() { *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Paths) String() string { @@ -3746,7 +3664,7 @@ func (*Paths) ProtoMessage() {} func (x *Paths) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3802,11 +3720,9 @@ type PrimitivesItems struct { func (x *PrimitivesItems) Reset() { *x = PrimitivesItems{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PrimitivesItems) String() string { @@ -3817,7 +3733,7 @@ func (*PrimitivesItems) ProtoMessage() {} func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3968,11 +3884,9 @@ type Properties struct { func (x *Properties) Reset() { *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Properties) String() string { @@ -3983,7 +3897,7 @@ func (*Properties) ProtoMessage() {} func (x *Properties) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4042,11 +3956,9 @@ type QueryParameterSubSchema struct { func (x *QueryParameterSubSchema) Reset() { *x = QueryParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueryParameterSubSchema) String() string { @@ -4057,7 +3969,7 @@ func (*QueryParameterSubSchema) ProtoMessage() {} func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4247,11 +4159,9 @@ type Response struct { func (x *Response) Reset() { *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Response) String() string { @@ -4262,7 +4172,7 @@ func (*Response) ProtoMessage() {} func (x *Response) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4323,11 +4233,9 @@ type ResponseDefinitions struct { func (x *ResponseDefinitions) Reset() { *x = ResponseDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseDefinitions) String() string { @@ -4338,7 +4246,7 @@ func (*ResponseDefinitions) ProtoMessage() {} func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4366,6 +4274,7 @@ type ResponseValue struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ResponseValue_Response // *ResponseValue_JsonReference Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` @@ -4373,11 +4282,9 @@ type ResponseValue struct { func (x *ResponseValue) Reset() { *x = ResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseValue) String() string { @@ -4388,7 +4295,7 @@ func (*ResponseValue) ProtoMessage() {} func (x *ResponseValue) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4452,11 +4359,9 @@ type Responses struct { func (x *Responses) Reset() { *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Responses) String() string { @@ -4467,7 +4372,7 @@ func (*Responses) ProtoMessage() {} func (x *Responses) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4537,11 +4442,9 @@ type Schema struct { func (x *Schema) Reset() { *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Schema) String() string { @@ -4552,7 +4455,7 @@ func (*Schema) ProtoMessage() {} func (x *Schema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4790,6 +4693,7 @@ type SchemaItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SchemaItem_Schema // *SchemaItem_FileSchema Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` @@ -4797,11 +4701,9 @@ type SchemaItem struct { func (x *SchemaItem) Reset() { *x = SchemaItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SchemaItem) String() string { @@ -4812,7 +4714,7 @@ func (*SchemaItem) ProtoMessage() {} func (x *SchemaItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4874,11 +4776,9 @@ type SecurityDefinitions struct { func (x *SecurityDefinitions) Reset() { *x = SecurityDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityDefinitions) String() string { @@ -4889,7 +4789,7 @@ func (*SecurityDefinitions) ProtoMessage() {} func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4917,6 +4817,7 @@ type SecurityDefinitionsItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SecurityDefinitionsItem_BasicAuthenticationSecurity // *SecurityDefinitionsItem_ApiKeySecurity // *SecurityDefinitionsItem_Oauth2ImplicitSecurity @@ -4928,11 +4829,9 @@ type SecurityDefinitionsItem struct { func (x *SecurityDefinitionsItem) Reset() { *x = SecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityDefinitionsItem) String() string { @@ -4943,7 +4842,7 @@ func (*SecurityDefinitionsItem) ProtoMessage() {} func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5057,11 +4956,9 @@ type SecurityRequirement struct { func (x *SecurityRequirement) Reset() { *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityRequirement) String() string { @@ -5072,7 +4969,7 @@ func (*SecurityRequirement) ProtoMessage() {} func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5104,11 +5001,9 @@ type StringArray struct { func (x *StringArray) Reset() { *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringArray) String() string { @@ -5119,7 +5014,7 @@ func (*StringArray) ProtoMessage() {} func (x *StringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5154,11 +5049,9 @@ type Tag struct { func (x *Tag) Reset() { *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Tag) String() string { @@ -5169,7 +5062,7 @@ func (*Tag) ProtoMessage() {} func (x *Tag) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5222,11 +5115,9 @@ type TypeItem struct { func (x *TypeItem) Reset() { *x = TypeItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TypeItem) String() string { @@ -5237,7 +5128,7 @@ func (*TypeItem) ProtoMessage() {} func (x *TypeItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5270,11 +5161,9 @@ type VendorExtension struct { func (x *VendorExtension) Reset() { *x = VendorExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VendorExtension) String() string { @@ -5285,7 +5174,7 @@ func (*VendorExtension) ProtoMessage() {} func (x *VendorExtension) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5322,11 +5211,9 @@ type Xml struct { func (x *Xml) Reset() { *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Xml) String() string { @@ -5337,7 +5224,7 @@ func (*Xml) ProtoMessage() {} func (x *Xml) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6356,7 +6243,7 @@ func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { } var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) -var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ +var file_openapiv2_OpenAPIv2_proto_goTypes = []any{ (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem (*Any)(nil), // 1: openapi.v2.Any (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity @@ -6565,755 +6452,33 @@ func file_openapiv2_OpenAPIv2_proto_init() { if File_openapiv2_OpenAPIv2_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApiKeySecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicAuthenticationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Default); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Definitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Examples); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FormDataParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Headers); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JsonReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonBodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2AccessCodeSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ApplicationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ImplicitSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2PasswordSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2Scopes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimitivesItems); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypeItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VendorExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []any{ (*AdditionalPropertiesItem_Schema)(nil), (*AdditionalPropertiesItem_Boolean)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []any{ (*NonBodyParameter_HeaderParameterSubSchema)(nil), (*NonBodyParameter_FormDataParameterSubSchema)(nil), (*NonBodyParameter_QueryParameterSubSchema)(nil), (*NonBodyParameter_PathParameterSubSchema)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []any{ (*Parameter_BodyParameter)(nil), (*Parameter_NonBodyParameter)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []any{ (*ParametersItem_Parameter)(nil), (*ParametersItem_JsonReference)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []any{ (*ResponseValue_Response)(nil), (*ResponseValue_JsonReference)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []any{ (*SchemaItem_Schema)(nil), (*SchemaItem_FileSchema)(nil), } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []any{ (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), (*SecurityDefinitionsItem_ApiKeySecurity)(nil), (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go index 945b8d11ff..b9df95a379 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.3 +// protoc-gen-go v1.35.1 +// protoc v4.23.4 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -43,6 +43,7 @@ type AdditionalPropertiesItem struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *AdditionalPropertiesItem_SchemaOrReference // *AdditionalPropertiesItem_Boolean Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` @@ -50,11 +51,9 @@ type AdditionalPropertiesItem struct { func (x *AdditionalPropertiesItem) Reset() { *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AdditionalPropertiesItem) String() string { @@ -65,7 +64,7 @@ func (*AdditionalPropertiesItem) ProtoMessage() {} func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -128,11 +127,9 @@ type Any struct { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -143,7 +140,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -178,6 +175,7 @@ type AnyOrExpression struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *AnyOrExpression_Any // *AnyOrExpression_Expression Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"` @@ -185,11 +183,9 @@ type AnyOrExpression struct { func (x *AnyOrExpression) Reset() { *x = AnyOrExpression{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AnyOrExpression) String() string { @@ -200,7 +196,7 @@ func (*AnyOrExpression) ProtoMessage() {} func (x *AnyOrExpression) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -264,11 +260,9 @@ type Callback struct { func (x *Callback) Reset() { *x = Callback{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Callback) String() string { @@ -279,7 +273,7 @@ func (*Callback) ProtoMessage() {} func (x *Callback) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -314,6 +308,7 @@ type CallbackOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *CallbackOrReference_Callback // *CallbackOrReference_Reference Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"` @@ -321,11 +316,9 @@ type CallbackOrReference struct { func (x *CallbackOrReference) Reset() { *x = CallbackOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CallbackOrReference) String() string { @@ -336,7 +329,7 @@ func (*CallbackOrReference) ProtoMessage() {} func (x *CallbackOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -398,11 +391,9 @@ type CallbacksOrReferences struct { func (x *CallbacksOrReferences) Reset() { *x = CallbacksOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CallbacksOrReferences) String() string { @@ -413,7 +404,7 @@ func (*CallbacksOrReferences) ProtoMessage() {} func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -455,11 +446,9 @@ type Components struct { func (x *Components) Reset() { *x = Components{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Components) String() string { @@ -470,7 +459,7 @@ func (*Components) ProtoMessage() {} func (x *Components) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -569,11 +558,9 @@ type Contact struct { func (x *Contact) Reset() { *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Contact) String() string { @@ -584,7 +571,7 @@ func (*Contact) ProtoMessage() {} func (x *Contact) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -633,6 +620,7 @@ type DefaultType struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *DefaultType_Number // *DefaultType_Boolean // *DefaultType_String_ @@ -641,11 +629,9 @@ type DefaultType struct { func (x *DefaultType) Reset() { *x = DefaultType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DefaultType) String() string { @@ -656,7 +642,7 @@ func (*DefaultType) ProtoMessage() {} func (x *DefaultType) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -734,11 +720,9 @@ type Discriminator struct { func (x *Discriminator) Reset() { *x = Discriminator{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Discriminator) String() string { @@ -749,7 +733,7 @@ func (*Discriminator) ProtoMessage() {} func (x *Discriminator) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -803,11 +787,9 @@ type Document struct { func (x *Document) Reset() { *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Document) String() string { @@ -818,7 +800,7 @@ func (*Document) ProtoMessage() {} func (x *Document) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -912,11 +894,9 @@ type Encoding struct { func (x *Encoding) Reset() { *x = Encoding{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Encoding) String() string { @@ -927,7 +907,7 @@ func (*Encoding) ProtoMessage() {} func (x *Encoding) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -994,11 +974,9 @@ type Encodings struct { func (x *Encodings) Reset() { *x = Encodings{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Encodings) String() string { @@ -1009,7 +987,7 @@ func (*Encodings) ProtoMessage() {} func (x *Encodings) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1045,11 +1023,9 @@ type Example struct { func (x *Example) Reset() { *x = Example{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Example) String() string { @@ -1060,7 +1036,7 @@ func (*Example) ProtoMessage() {} func (x *Example) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1116,6 +1092,7 @@ type ExampleOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ExampleOrReference_Example // *ExampleOrReference_Reference Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"` @@ -1123,11 +1100,9 @@ type ExampleOrReference struct { func (x *ExampleOrReference) Reset() { *x = ExampleOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExampleOrReference) String() string { @@ -1138,7 +1113,7 @@ func (*ExampleOrReference) ProtoMessage() {} func (x *ExampleOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1200,11 +1175,9 @@ type ExamplesOrReferences struct { func (x *ExamplesOrReferences) Reset() { *x = ExamplesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExamplesOrReferences) String() string { @@ -1215,7 +1188,7 @@ func (*ExamplesOrReferences) ProtoMessage() {} func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1247,11 +1220,9 @@ type Expression struct { func (x *Expression) Reset() { *x = Expression{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expression) String() string { @@ -1262,7 +1233,7 @@ func (*Expression) ProtoMessage() {} func (x *Expression) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1297,11 +1268,9 @@ type ExternalDocs struct { func (x *ExternalDocs) Reset() { *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExternalDocs) String() string { @@ -1312,7 +1281,7 @@ func (*ExternalDocs) ProtoMessage() {} func (x *ExternalDocs) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1370,11 +1339,9 @@ type Header struct { func (x *Header) Reset() { *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Header) String() string { @@ -1385,7 +1352,7 @@ func (*Header) ProtoMessage() {} func (x *Header) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1490,6 +1457,7 @@ type HeaderOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *HeaderOrReference_Header // *HeaderOrReference_Reference Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"` @@ -1497,11 +1465,9 @@ type HeaderOrReference struct { func (x *HeaderOrReference) Reset() { *x = HeaderOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HeaderOrReference) String() string { @@ -1512,7 +1478,7 @@ func (*HeaderOrReference) ProtoMessage() {} func (x *HeaderOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1574,11 +1540,9 @@ type HeadersOrReferences struct { func (x *HeadersOrReferences) Reset() { *x = HeadersOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HeadersOrReferences) String() string { @@ -1589,7 +1553,7 @@ func (*HeadersOrReferences) ProtoMessage() {} func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1629,11 +1593,9 @@ type Info struct { func (x *Info) Reset() { *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Info) String() string { @@ -1644,7 +1606,7 @@ func (*Info) ProtoMessage() {} func (x *Info) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1725,11 +1687,9 @@ type ItemsItem struct { func (x *ItemsItem) Reset() { *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ItemsItem) String() string { @@ -1740,7 +1700,7 @@ func (*ItemsItem) ProtoMessage() {} func (x *ItemsItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1775,11 +1735,9 @@ type License struct { func (x *License) Reset() { *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *License) String() string { @@ -1790,7 +1748,7 @@ func (*License) ProtoMessage() {} func (x *License) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1843,11 +1801,9 @@ type Link struct { func (x *Link) Reset() { *x = Link{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Link) String() string { @@ -1858,7 +1814,7 @@ func (*Link) ProtoMessage() {} func (x *Link) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1928,6 +1884,7 @@ type LinkOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *LinkOrReference_Link // *LinkOrReference_Reference Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"` @@ -1935,11 +1892,9 @@ type LinkOrReference struct { func (x *LinkOrReference) Reset() { *x = LinkOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LinkOrReference) String() string { @@ -1950,7 +1905,7 @@ func (*LinkOrReference) ProtoMessage() {} func (x *LinkOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2012,11 +1967,9 @@ type LinksOrReferences struct { func (x *LinksOrReferences) Reset() { *x = LinksOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LinksOrReferences) String() string { @@ -2027,7 +1980,7 @@ func (*LinksOrReferences) ProtoMessage() {} func (x *LinksOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2064,11 +2017,9 @@ type MediaType struct { func (x *MediaType) Reset() { *x = MediaType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MediaType) String() string { @@ -2079,7 +2030,7 @@ func (*MediaType) ProtoMessage() {} func (x *MediaType) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2139,11 +2090,9 @@ type MediaTypes struct { func (x *MediaTypes) Reset() { *x = MediaTypes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MediaTypes) String() string { @@ -2154,7 +2103,7 @@ func (*MediaTypes) ProtoMessage() {} func (x *MediaTypes) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2190,11 +2139,9 @@ type NamedAny struct { func (x *NamedAny) Reset() { *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedAny) String() string { @@ -2205,7 +2152,7 @@ func (*NamedAny) ProtoMessage() {} func (x *NamedAny) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2248,11 +2195,9 @@ type NamedCallbackOrReference struct { func (x *NamedCallbackOrReference) Reset() { *x = NamedCallbackOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedCallbackOrReference) String() string { @@ -2263,7 +2208,7 @@ func (*NamedCallbackOrReference) ProtoMessage() {} func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2306,11 +2251,9 @@ type NamedEncoding struct { func (x *NamedEncoding) Reset() { *x = NamedEncoding{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedEncoding) String() string { @@ -2321,7 +2264,7 @@ func (*NamedEncoding) ProtoMessage() {} func (x *NamedEncoding) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2364,11 +2307,9 @@ type NamedExampleOrReference struct { func (x *NamedExampleOrReference) Reset() { *x = NamedExampleOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedExampleOrReference) String() string { @@ -2379,7 +2320,7 @@ func (*NamedExampleOrReference) ProtoMessage() {} func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2422,11 +2363,9 @@ type NamedHeaderOrReference struct { func (x *NamedHeaderOrReference) Reset() { *x = NamedHeaderOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedHeaderOrReference) String() string { @@ -2437,7 +2376,7 @@ func (*NamedHeaderOrReference) ProtoMessage() {} func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2480,11 +2419,9 @@ type NamedLinkOrReference struct { func (x *NamedLinkOrReference) Reset() { *x = NamedLinkOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedLinkOrReference) String() string { @@ -2495,7 +2432,7 @@ func (*NamedLinkOrReference) ProtoMessage() {} func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2538,11 +2475,9 @@ type NamedMediaType struct { func (x *NamedMediaType) Reset() { *x = NamedMediaType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedMediaType) String() string { @@ -2553,7 +2488,7 @@ func (*NamedMediaType) ProtoMessage() {} func (x *NamedMediaType) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2596,11 +2531,9 @@ type NamedParameterOrReference struct { func (x *NamedParameterOrReference) Reset() { *x = NamedParameterOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedParameterOrReference) String() string { @@ -2611,7 +2544,7 @@ func (*NamedParameterOrReference) ProtoMessage() {} func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2654,11 +2587,9 @@ type NamedPathItem struct { func (x *NamedPathItem) Reset() { *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedPathItem) String() string { @@ -2669,7 +2600,7 @@ func (*NamedPathItem) ProtoMessage() {} func (x *NamedPathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2712,11 +2643,9 @@ type NamedRequestBodyOrReference struct { func (x *NamedRequestBodyOrReference) Reset() { *x = NamedRequestBodyOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedRequestBodyOrReference) String() string { @@ -2727,7 +2656,7 @@ func (*NamedRequestBodyOrReference) ProtoMessage() {} func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2770,11 +2699,9 @@ type NamedResponseOrReference struct { func (x *NamedResponseOrReference) Reset() { *x = NamedResponseOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedResponseOrReference) String() string { @@ -2785,7 +2712,7 @@ func (*NamedResponseOrReference) ProtoMessage() {} func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2828,11 +2755,9 @@ type NamedSchemaOrReference struct { func (x *NamedSchemaOrReference) Reset() { *x = NamedSchemaOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSchemaOrReference) String() string { @@ -2843,7 +2768,7 @@ func (*NamedSchemaOrReference) ProtoMessage() {} func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2886,11 +2811,9 @@ type NamedSecuritySchemeOrReference struct { func (x *NamedSecuritySchemeOrReference) Reset() { *x = NamedSecuritySchemeOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedSecuritySchemeOrReference) String() string { @@ -2901,7 +2824,7 @@ func (*NamedSecuritySchemeOrReference) ProtoMessage() {} func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2944,11 +2867,9 @@ type NamedServerVariable struct { func (x *NamedServerVariable) Reset() { *x = NamedServerVariable{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedServerVariable) String() string { @@ -2959,7 +2880,7 @@ func (*NamedServerVariable) ProtoMessage() {} func (x *NamedServerVariable) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3002,11 +2923,9 @@ type NamedString struct { func (x *NamedString) Reset() { *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedString) String() string { @@ -3017,7 +2936,7 @@ func (*NamedString) ProtoMessage() {} func (x *NamedString) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3060,11 +2979,9 @@ type NamedStringArray struct { func (x *NamedStringArray) Reset() { *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamedStringArray) String() string { @@ -3075,7 +2992,7 @@ func (*NamedStringArray) ProtoMessage() {} func (x *NamedStringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3119,11 +3036,9 @@ type OauthFlow struct { func (x *OauthFlow) Reset() { *x = OauthFlow{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OauthFlow) String() string { @@ -3134,7 +3049,7 @@ func (*OauthFlow) ProtoMessage() {} func (x *OauthFlow) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3199,11 +3114,9 @@ type OauthFlows struct { func (x *OauthFlows) Reset() { *x = OauthFlows{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OauthFlows) String() string { @@ -3214,7 +3127,7 @@ func (*OauthFlows) ProtoMessage() {} func (x *OauthFlows) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3187,9 @@ type Object struct { func (x *Object) Reset() { *x = Object{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Object) String() string { @@ -3289,7 +3200,7 @@ func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3334,11 +3245,9 @@ type Operation struct { func (x *Operation) Reset() { *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Operation) String() string { @@ -3349,7 +3258,7 @@ func (*Operation) ProtoMessage() {} func (x *Operation) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3479,11 +3388,9 @@ type Parameter struct { func (x *Parameter) Reset() { *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Parameter) String() string { @@ -3494,7 +3401,7 @@ func (*Parameter) ProtoMessage() {} func (x *Parameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3613,6 +3520,7 @@ type ParameterOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ParameterOrReference_Parameter // *ParameterOrReference_Reference Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"` @@ -3620,11 +3528,9 @@ type ParameterOrReference struct { func (x *ParameterOrReference) Reset() { *x = ParameterOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParameterOrReference) String() string { @@ -3635,7 +3541,7 @@ func (*ParameterOrReference) ProtoMessage() {} func (x *ParameterOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3697,11 +3603,9 @@ type ParametersOrReferences struct { func (x *ParametersOrReferences) Reset() { *x = ParametersOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParametersOrReferences) String() string { @@ -3712,7 +3616,7 @@ func (*ParametersOrReferences) ProtoMessage() {} func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3758,11 +3662,9 @@ type PathItem struct { func (x *PathItem) Reset() { *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PathItem) String() string { @@ -3773,7 +3675,7 @@ func (*PathItem) ProtoMessage() {} func (x *PathItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3898,11 +3800,9 @@ type Paths struct { func (x *Paths) Reset() { *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Paths) String() string { @@ -3913,7 +3813,7 @@ func (*Paths) ProtoMessage() {} func (x *Paths) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3952,11 +3852,9 @@ type Properties struct { func (x *Properties) Reset() { *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Properties) String() string { @@ -3967,7 +3865,7 @@ func (*Properties) ProtoMessage() {} func (x *Properties) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4002,11 +3900,9 @@ type Reference struct { func (x *Reference) Reset() { *x = Reference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Reference) String() string { @@ -4017,7 +3913,7 @@ func (*Reference) ProtoMessage() {} func (x *Reference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4063,11 +3959,9 @@ type RequestBodiesOrReferences struct { func (x *RequestBodiesOrReferences) Reset() { *x = RequestBodiesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestBodiesOrReferences) String() string { @@ -4078,7 +3972,7 @@ func (*RequestBodiesOrReferences) ProtoMessage() {} func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4114,11 +4008,9 @@ type RequestBody struct { func (x *RequestBody) Reset() { *x = RequestBody{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestBody) String() string { @@ -4129,7 +4021,7 @@ func (*RequestBody) ProtoMessage() {} func (x *RequestBody) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4178,6 +4070,7 @@ type RequestBodyOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *RequestBodyOrReference_RequestBody // *RequestBodyOrReference_Reference Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"` @@ -4185,11 +4078,9 @@ type RequestBodyOrReference struct { func (x *RequestBodyOrReference) Reset() { *x = RequestBodyOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestBodyOrReference) String() string { @@ -4200,7 +4091,7 @@ func (*RequestBodyOrReference) ProtoMessage() {} func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4267,11 +4158,9 @@ type Response struct { func (x *Response) Reset() { *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Response) String() string { @@ -4282,7 +4171,7 @@ func (*Response) ProtoMessage() {} func (x *Response) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4338,6 +4227,7 @@ type ResponseOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *ResponseOrReference_Response // *ResponseOrReference_Reference Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"` @@ -4345,11 +4235,9 @@ type ResponseOrReference struct { func (x *ResponseOrReference) Reset() { *x = ResponseOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseOrReference) String() string { @@ -4360,7 +4248,7 @@ func (*ResponseOrReference) ProtoMessage() {} func (x *ResponseOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4425,11 +4313,9 @@ type Responses struct { func (x *Responses) Reset() { *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Responses) String() string { @@ -4440,7 +4326,7 @@ func (*Responses) ProtoMessage() {} func (x *Responses) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4486,11 +4372,9 @@ type ResponsesOrReferences struct { func (x *ResponsesOrReferences) Reset() { *x = ResponsesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponsesOrReferences) String() string { @@ -4501,7 +4385,7 @@ func (*ResponsesOrReferences) ProtoMessage() {} func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4569,11 +4453,9 @@ type Schema struct { func (x *Schema) Reset() { *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Schema) String() string { @@ -4584,7 +4466,7 @@ func (*Schema) ProtoMessage() {} func (x *Schema) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4857,6 +4739,7 @@ type SchemaOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SchemaOrReference_Schema // *SchemaOrReference_Reference Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"` @@ -4864,11 +4747,9 @@ type SchemaOrReference struct { func (x *SchemaOrReference) Reset() { *x = SchemaOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SchemaOrReference) String() string { @@ -4879,7 +4760,7 @@ func (*SchemaOrReference) ProtoMessage() {} func (x *SchemaOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4941,11 +4822,9 @@ type SchemasOrReferences struct { func (x *SchemasOrReferences) Reset() { *x = SchemasOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SchemasOrReferences) String() string { @@ -4956,7 +4835,7 @@ func (*SchemasOrReferences) ProtoMessage() {} func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4989,11 +4868,9 @@ type SecurityRequirement struct { func (x *SecurityRequirement) Reset() { *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityRequirement) String() string { @@ -5004,7 +4881,7 @@ func (*SecurityRequirement) ProtoMessage() {} func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5045,11 +4922,9 @@ type SecurityScheme struct { func (x *SecurityScheme) Reset() { *x = SecurityScheme{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityScheme) String() string { @@ -5060,7 +4935,7 @@ func (*SecurityScheme) ProtoMessage() {} func (x *SecurityScheme) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5144,6 +5019,7 @@ type SecuritySchemeOrReference struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SecuritySchemeOrReference_SecurityScheme // *SecuritySchemeOrReference_Reference Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"` @@ -5151,11 +5027,9 @@ type SecuritySchemeOrReference struct { func (x *SecuritySchemeOrReference) Reset() { *x = SecuritySchemeOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecuritySchemeOrReference) String() string { @@ -5166,7 +5040,7 @@ func (*SecuritySchemeOrReference) ProtoMessage() {} func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5228,11 +5102,9 @@ type SecuritySchemesOrReferences struct { func (x *SecuritySchemesOrReferences) Reset() { *x = SecuritySchemesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecuritySchemesOrReferences) String() string { @@ -5243,7 +5115,7 @@ func (*SecuritySchemesOrReferences) ProtoMessage() {} func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5279,11 +5151,9 @@ type Server struct { func (x *Server) Reset() { *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Server) String() string { @@ -5294,7 +5164,7 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5351,11 +5221,9 @@ type ServerVariable struct { func (x *ServerVariable) Reset() { *x = ServerVariable{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerVariable) String() string { @@ -5366,7 +5234,7 @@ func (*ServerVariable) ProtoMessage() {} func (x *ServerVariable) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5419,11 +5287,9 @@ type ServerVariables struct { func (x *ServerVariables) Reset() { *x = ServerVariables{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerVariables) String() string { @@ -5434,7 +5300,7 @@ func (*ServerVariables) ProtoMessage() {} func (x *ServerVariables) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5463,6 +5329,7 @@ type SpecificationExtension struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: + // // *SpecificationExtension_Number // *SpecificationExtension_Boolean // *SpecificationExtension_String_ @@ -5471,11 +5338,9 @@ type SpecificationExtension struct { func (x *SpecificationExtension) Reset() { *x = SpecificationExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SpecificationExtension) String() string { @@ -5486,7 +5351,7 @@ func (*SpecificationExtension) ProtoMessage() {} func (x *SpecificationExtension) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5561,11 +5426,9 @@ type StringArray struct { func (x *StringArray) Reset() { *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringArray) String() string { @@ -5576,7 +5439,7 @@ func (*StringArray) ProtoMessage() {} func (x *StringArray) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5608,11 +5471,9 @@ type Strings struct { func (x *Strings) Reset() { *x = Strings{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Strings) String() string { @@ -5623,7 +5484,7 @@ func (*Strings) ProtoMessage() {} func (x *Strings) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5659,11 +5520,9 @@ type Tag struct { func (x *Tag) Reset() { *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Tag) String() string { @@ -5674,7 +5533,7 @@ func (*Tag) ProtoMessage() {} func (x *Tag) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5733,11 +5592,9 @@ type Xml struct { func (x *Xml) Reset() { *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Xml) String() string { @@ -5748,7 +5605,7 @@ func (*Xml) ProtoMessage() {} func (x *Xml) ProtoReflect() protoreflect.Message { mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6781,7 +6638,7 @@ func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte { } var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78) -var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{ +var file_openapiv3_OpenAPIv3_proto_goTypes = []any{ (*AdditionalPropertiesItem)(nil), // 0: openapi.v3.AdditionalPropertiesItem (*Any)(nil), // 1: openapi.v3.Any (*AnyOrExpression)(nil), // 2: openapi.v3.AnyOrExpression @@ -7040,994 +6897,56 @@ func file_openapiv3_OpenAPIv3_proto_init() { if File_openapiv3_OpenAPIv3_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AnyOrExpression); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Callback); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbackOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbacksOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Components); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DefaultType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Discriminator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Encoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Encodings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Example); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExampleOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExamplesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expression); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeadersOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Link); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LinkOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LinksOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MediaType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MediaTypes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedCallbackOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedEncoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedExampleOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeaderOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedLinkOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedMediaType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameterOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedRequestBodyOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchemaOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecuritySchemeOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedServerVariable); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OauthFlow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OauthFlows); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Reference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBodiesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBody); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBodyOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponsesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemasOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityScheme); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecuritySchemeOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecuritySchemesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerVariable); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerVariables); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SpecificationExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Strings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []any{ (*AdditionalPropertiesItem_SchemaOrReference)(nil), (*AdditionalPropertiesItem_Boolean)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []any{ (*AnyOrExpression_Any)(nil), (*AnyOrExpression_Expression)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []any{ (*CallbackOrReference_Callback)(nil), (*CallbackOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []any{ (*DefaultType_Number)(nil), (*DefaultType_Boolean)(nil), (*DefaultType_String_)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []any{ (*ExampleOrReference_Example)(nil), (*ExampleOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []any{ (*HeaderOrReference_Header)(nil), (*HeaderOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []any{ (*LinkOrReference_Link)(nil), (*LinkOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []any{ (*ParameterOrReference_Parameter)(nil), (*ParameterOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []any{ (*RequestBodyOrReference_RequestBody)(nil), (*RequestBodyOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []any{ (*ResponseOrReference_Response)(nil), (*ResponseOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []any{ (*SchemaOrReference_Schema)(nil), (*SchemaOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []any{ (*SecuritySchemeOrReference_SecurityScheme)(nil), (*SecuritySchemeOrReference_Reference)(nil), } - file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{ + file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []any{ (*SpecificationExtension_Number)(nil), (*SpecificationExtension_Boolean)(nil), (*SpecificationExtension_String_)(nil), diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go new file mode 100644 index 0000000000..f9f1bd2654 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go @@ -0,0 +1,182 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v4.23.4 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto new file mode 100644 index 0000000000..09ee0aac51 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "google/protobuf/descriptor.proto"; +import "openapiv3/OpenAPIv3.proto"; + +// The Go package name. +option go_package = "./openapiv3;openapi_v3"; +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index 061d72ae07..0000000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - master - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 97c1b34fd5..0000000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 761520a8ce..0000000000 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,605 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "regexp" - "time" - - "github.com/google/gofuzz/bytesource" - "strings" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int - skipFieldPatterns []*regexp.Regexp -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// NewFromGoFuzz is a helper function that enables using gofuzz (this -// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous -// fuzzing. Essentially, it enables translating the fuzzing bytes from -// go-fuzz to any Go object using this library. -// -// This implementation promises a constant translation from a given slice of -// bytes to the fuzzed objects. This promise will remain over future -// versions of Go and of this library. -// -// Note: the returned Fuzzer should not be shared between multiple goroutines, -// as its deterministic output will no longer be available. -// -// Example: use go-fuzz to test the function `MyFunc(int)` in the package -// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: -// -// // +build gofuzz -// package mypacakge -// import fuzz "github.com/google/gofuzz" -// func Fuzz(data []byte) int { -// var i int -// fuzz.NewFromGoFuzz(data).Fuzz(&i) -// MyFunc(i) -// return 0 -// } -func NewFromGoFuzz(data []byte) *Fuzzer { - return New().RandSource(bytesource.New(data)) -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() >= f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Skip fields which match the supplied pattern. Call this multiple times if needed -// This is useful to skip XXX_ fields generated by protobuf -func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { - f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - skipField := false - fieldName := v.Type().Field(i).Name - for _, pattern := range fc.fuzzer.skipFieldPatterns { - if pattern.MatchString(fieldName) { - skipField = true - break - } - } - if !skipField { - fc.doFuzz(v.Field(i), 0) - } - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex(r.Float64(), r.Float64())) - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - return r.Int31()&(1<<30) == 0 -} - -type int63nPicker interface { - Int63n(int64) int64 -} - -// UnicodeRange describes a sequential range of unicode characters. -// Last must be numerically greater than First. -type UnicodeRange struct { - First, Last rune -} - -// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. -// To be useful, each range must have at least one character (First <= Last) and -// there must be at least one range. -type UnicodeRanges []UnicodeRange - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (ur UnicodeRange) choose(r int63nPicker) rune { - count := int64(ur.Last - ur.First + 1) - return ur.First + rune(r.Int63n(count)) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from the range ur. If there are no characters -// in the range (cr.Last < cr.First), this will panic. -func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { - ur.check() - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// check is a function that used to check whether the first of ur(UnicodeRange) -// is greater than the last one. -func (ur UnicodeRange) check() { - if ur.Last < ur.First { - panic("The last encoding must be greater than the first one.") - } -} - -// randString of UnicodeRange makes a random string up to 20 characters long. -// Each character is selected form ur(UnicodeRange). -func (ur UnicodeRange) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur.choose(r)) - } - return sb.String() -} - -// defaultUnicodeRanges sets a default unicode range when user do not set -// CustomStringFuzzFunc() but wants fuzz string. -var defaultUnicodeRanges = UnicodeRanges{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from one of the ranges of ur(UnicodeRanges). -// Each range has an equal probability of being chosen. If there are no ranges, -// or a selected range has no characters (.Last < .First), this will panic. -// Do not modify any of the ranges in ur after calling this function. -func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { - // Check unicode ranges slice is empty. - if len(ur) == 0 { - panic("UnicodeRanges is empty.") - } - // if not empty, each range should be checked. - for i := range ur { - ur[i].check() - } - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// randString of UnicodeRanges makes a random string up to 20 characters long. -// Each character is selected form one of the ranges of ur(UnicodeRanges), -// and each range has an equal probability of being chosen. -func (ur UnicodeRanges) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) - } - return sb.String() -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - return defaultUnicodeRanges.randString(r) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig deleted file mode 100644 index 2940ec92ac..0000000000 --- a/vendor/github.com/gorilla/websocket/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index 84039fec68..cd3fcd1ef7 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1 +1,25 @@ -coverage.coverprofile +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml deleted file mode 100644 index 34882139e1..0000000000 --- a/vendor/github.com/gorilla/websocket/.golangci.yml +++ /dev/null @@ -1,3 +0,0 @@ -run: - skip-dirs: - - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 0000000000..1931f40068 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index bb9d80bc9b..9171c97225 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,27 +1,22 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile deleted file mode 100644 index 603a63f50a..0000000000 --- a/vendor/github.com/gorilla/websocket/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec -exclude-dir examples ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 1fd5e9c4e7..ff8bfab0b2 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,23 +1,19 @@ -# gorilla/websocket +# Gorilla WebSocket -![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) -[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. ### Documentation * [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) -* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) +* [Chat example](https://github.com/gorilla/websocket/tree/main/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/main/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/main/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/main/examples/filewatch) ### Status @@ -33,4 +29,4 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). +subdirectory](https://github.com/gorilla/websocket/tree/main/examples/autobahn). diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 815b0ca5c8..00917ea341 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -11,16 +11,12 @@ import ( "errors" "fmt" "io" - "log" - "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" - - "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -55,18 +51,34 @@ func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufS // // It is safe to call Dialer's methods concurrently. type Dialer struct { + // The following custom dial functions can be set to establish + // connections to either the backend server or the proxy (if it + // exists). The scheme of the dialed entity (either backend or + // proxy) determines which custom dial function is selected: + // either NetDialTLSContext for HTTPS or NetDialContext/NetDial + // for HTTP. Since the "Proxy" function can determine the scheme + // dynamically, it can make sense to set multiple custom dial + // functions simultaneously. + // // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. + // NetDial is nil, net.Dialer DialContext is used. + // If "Proxy" field is also set, this function dials the proxy--not + // the backend server. NetDial func(network, addr string) (net.Conn, error) // NetDialContext specifies the dial function for creating TCP connections. If // NetDialContext is nil, NetDial is used. + // If "Proxy" field is also set, this function dials the proxy--not + // the backend server. NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If // NetDialTLSContext is nil, NetDialContext is used. // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and // TLSClientConfig is ignored. + // If "Proxy" field is also set, this function dials the proxy (and performs + // the TLS handshake with the proxy, ignoring TLSClientConfig). In this TLS proxy + // dialing case the TLSClientConfig could still be necessary for TLS to the backend server. NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) // Proxy specifies a function to return a proxy for a given @@ -77,7 +89,7 @@ type Dialer struct { // TLSClientConfig specifies the TLS configuration to use with tls.Client. // If nil, the default configuration is used. - // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // If NetDialTLSContext is set, Dial assumes the TLS handshake // is done there and TLSClientConfig is ignored. TLSClientConfig *tls.Config @@ -228,7 +240,6 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || - //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -249,73 +260,16 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h defer cancel() } - // Get network dial function. - var netDial func(network, add string) (net.Conn, error) - - switch u.Scheme { - case "http": - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - case "https": - if d.NetDialTLSContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialTLSContext(ctx, network, addr) - } - } else if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - default: - return nil, nil, errMalformedURL - } - - if netDial == nil { - netDialer := &net.Dialer{} - netDial = func(network, addr string) (net.Conn, error) { - return netDialer.DialContext(ctx, network, addr) - } - } - - // If needed, wrap the dial function to set the connection deadline. - if deadline, ok := ctx.Deadline(); ok { - forwardDial := netDial - netDial = func(network, addr string) (net.Conn, error) { - c, err := forwardDial(network, addr) - if err != nil { - return nil, err - } - err = c.SetDeadline(deadline) - if err != nil { - if err := c.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - return c, nil - } - } - - // If needed, wrap the dial function to connect through a proxy. + var proxyURL *url.URL if d.Proxy != nil { - proxyURL, err := d.Proxy(req) + proxyURL, err = d.Proxy(req) if err != nil { return nil, nil, err } - if proxyURL != nil { - dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) - if err != nil { - return nil, nil, err - } - netDial = dialer.Dial - } + } + netDial, err := d.netDialFn(ctx, proxyURL, u) + if err != nil { + return nil, nil, err } hostPort, hostNoPort := hostPortNoPort(u) @@ -324,7 +278,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h trace.GetConn(hostPort) } - netConn, err := netDial("tcp", hostPort) + netConn, err := netDial(ctx, "tcp", hostPort) if err != nil { return nil, nil, err } @@ -334,16 +288,20 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h }) } + // Close the network connection when returning an error. The variable + // netConn is set to nil before the success return at the end of the + // function. defer func() { if netConn != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + // It's safe to ignore the error from Close() because this code is + // only executed when returning a more important error to the + // application. + _ = netConn.Close() } }() - if u.Scheme == "https" && d.NetDialTLSContext == nil { - // If NetDialTLSContext is set, assume that the TLS handshake has already been done + // Do TLS handshake over established connection if a proxy exists. + if proxyURL != nil && u.Scheme == "https" { cfg := cloneTLSConfig(d.TLSClientConfig) if cfg.ServerName == "" { @@ -430,15 +388,130 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") if err := netConn.SetDeadline(time.Time{}); err != nil { - return nil, nil, err + return nil, resp, err } - netConn = nil // to avoid close in defer. + + // Success! Set netConn to nil to stop the deferred function above from + // closing the network connection. + netConn = nil + return conn, resp, nil } +// Returns the dial function to establish the connection to either the backend +// server or the proxy (if it exists). If the dialed entity is HTTPS, then the +// returned dial function *also* performs the TLS handshake to the dialed entity. +// NOTE: If a proxy exists, it is possible for a second TLS handshake to be +// necessary over the established connection. +func (d *Dialer) netDialFn(ctx context.Context, proxyURL *url.URL, backendURL *url.URL) (netDialerFunc, error) { + var netDial netDialerFunc + if proxyURL != nil { + netDial = d.netDialFromURL(proxyURL) + } else { + netDial = d.netDialFromURL(backendURL) + } + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + netDial = netDialWithDeadline(netDial, deadline) + } + // Proxy dialing is wrapped to implement CONNECT method and possibly proxy auth. + if proxyURL != nil { + return proxyFromURL(proxyURL, netDial) + } + return netDial, nil +} + +// Returns function to create the connection depending on the Dialer's +// custom dialing functions and the passed URL of entity connecting to. +func (d *Dialer) netDialFromURL(u *url.URL) netDialerFunc { + var netDial netDialerFunc + switch { + case d.NetDialContext != nil: + netDial = d.NetDialContext + case d.NetDial != nil: + netDial = func(ctx context.Context, net, addr string) (net.Conn, error) { + return d.NetDial(net, addr) + } + default: + netDial = (&net.Dialer{}).DialContext + } + // If dialed entity is HTTPS, then either use custom TLS dialing function (if exists) + // or wrap the previously computed "netDial" to use TLS config for handshake. + if u.Scheme == "https" { + if d.NetDialTLSContext != nil { + netDial = d.NetDialTLSContext + } else { + netDial = netDialWithTLSHandshake(netDial, d.TLSClientConfig, u) + } + } + return netDial +} + +// Returns wrapped "netDial" function, performing TLS handshake after connecting. +func netDialWithTLSHandshake(netDial netDialerFunc, tlsConfig *tls.Config, u *url.URL) netDialerFunc { + return func(ctx context.Context, unused, addr string) (net.Conn, error) { + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + // Creates TCP connection to addr using passed "netDial" function. + conn, err := netDial(ctx, "tcp", addr) + if err != nil { + return nil, err + } + cfg := cloneTLSConfig(tlsConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(conn, cfg) + // Do the TLS handshake using TLSConfig over the wrapped connection. + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err = doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + if err != nil { + tlsConn.Close() + return nil, err + } + return tlsConn, nil + } +} + +// Returns wrapped "netDial" function, setting passed deadline. +func netDialWithDeadline(netDial netDialerFunc, deadline time.Time) netDialerFunc { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + c, err := netDial(ctx, network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } +} + func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{MinVersion: tls.VersionTLS12} + return &tls.Config{} } return cfg.Clone() } + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 9fed0ef521..fe1079edbc 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,7 +8,6 @@ import ( "compress/flate" "errors" "io" - "log" "strings" "sync" ) @@ -34,8 +33,10 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { - panic(err) + mr := io.MultiReader(r, strings.NewReader(tail)) + if err := fr.(flate.Resetter).Reset(mr, nil); err != nil { + // Reset never fails, but handle error in case that changes. + fr = flate.NewReader(mr) } return &flateReadWrapper{fr} } @@ -135,9 +136,7 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - if err := r.Close(); err != nil { - log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) - } + r.Close() } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 221e6cf798..9562ffd497 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -10,7 +10,6 @@ import ( "encoding/binary" "errors" "io" - "log" "net" "strconv" "strings" @@ -193,13 +192,6 @@ func newMaskKey() [4]byte { return k } -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - func isControl(frameType int) bool { return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage } @@ -365,7 +357,6 @@ func (c *Conn) RemoteAddr() net.Addr { // Write methods func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) c.writeErrMu.Lock() if c.writeErr == nil { c.writeErr = err @@ -379,9 +370,9 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - if _, err := c.br.Discard(len(p)); err != nil { - return p, err - } + // Discard is guaranteed to succeed because the number of bytes to discard + // is less than or equal to the number of bytes buffered. + _, _ = c.br.Discard(len(p)) return p, err } @@ -447,21 +438,27 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er maskBytes(key, 0, buf[6:]) } - d := 1000 * time.Hour - if !deadline.IsZero() { - d = time.Until(deadline) + if deadline.IsZero() { + // No timeout for zero time. + <-c.mu + } else { + d := time.Until(deadline) if d < 0 { return errWriteTimeout } + select { + case <-c.mu: + default: + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + } } - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() @@ -474,8 +471,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er if err := c.conn.SetWriteDeadline(deadline); err != nil { return c.writeFatal(err) } - _, err = c.conn.Write(buf) - if err != nil { + if _, err = c.conn.Write(buf); err != nil { return c.writeFatal(err) } if messageType == CloseMessage { @@ -490,9 +486,7 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - if err := c.writer.Close(); err != nil { - log.Printf("websocket: discarding writer close error: %v", err) - } + c.writer.Close() c.writer = nil } @@ -832,9 +826,7 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { - return noFrame, err - } + _ = c.setReadRemaining(int64(p[1] & 0x7f)) // will not fail because argument is >= 0 c.readDecompress = false if rsv1 { @@ -939,9 +931,8 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { - return noFrame, err - } + // Make a best effort to send a close message describing the problem. + _ = c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit } @@ -953,9 +944,7 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - if err := c.setReadRemaining(0); err != nil { - return noFrame, err - } + _ = c.setReadRemaining(0) // will not fail because argument is >= 0 if err != nil { return noFrame, err } @@ -1002,9 +991,8 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { - return err - } + // Make a best effor to send a close message describing the problem. + _ = c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) return errors.New("websocket: " + message) } @@ -1021,9 +1009,7 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - if err := c.reader.Close(); err != nil { - log.Printf("websocket: discarding reader close error: %v", err) - } + c.reader.Close() c.reader = nil } @@ -1033,7 +1019,7 @@ func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { for c.readErr == nil { frameType, err := c.advanceFrame() if err != nil { - c.readErr = hideTempErr(err) + c.readErr = err break } @@ -1073,15 +1059,13 @@ func (r *messageReader) Read(b []byte) (int, error) { b = b[:c.readRemaining] } n, err := c.br.Read(b) - c.readErr = hideTempErr(err) + c.readErr = err if c.isServer { c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) } rem := c.readRemaining rem -= int64(n) - if err := c.setReadRemaining(rem); err != nil { - return 0, err - } + _ = c.setReadRemaining(rem) // rem is guaranteed to be >= 0 if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1096,7 +1080,7 @@ func (r *messageReader) Read(b []byte) (int, error) { frameType, err := c.advanceFrame() switch { case err != nil: - c.readErr = hideTempErr(err) + c.readErr = err case frameType == TextMessage || frameType == BinaryMessage: c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") } @@ -1163,9 +1147,8 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { - return err - } + // Make a best effor to send the close message. + _ = c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) return nil } } @@ -1187,13 +1170,9 @@ func (c *Conn) PingHandler() func(appData string) error { func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if _, ok := err.(net.Error); ok { - return nil - } - return err + // Make a best effort to send the pong message. + _ = c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + return nil } } c.handlePing = h diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index 67d0968be8..d0742bf2a5 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,7 +9,6 @@ package websocket import "unsafe" -// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -23,7 +22,6 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. - //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -38,13 +36,11 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } - //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { - //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index 80f55d1eac..d716a05884 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -6,9 +6,10 @@ package websocket import ( "bufio" + "bytes" + "context" "encoding/base64" "errors" - "log" "net" "net/http" "net/url" @@ -17,26 +18,40 @@ import ( "golang.org/x/net/proxy" ) -type netDialerFunc func(network, addr string) (net.Conn, error) +type netDialerFunc func(ctx context.Context, network, addr string) (net.Conn, error) func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { - return fn(network, addr) + return fn(context.Background(), network, addr) } -func init() { - proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil - }) +func (fn netDialerFunc) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + return fn(ctx, network, addr) +} + +func proxyFromURL(proxyURL *url.URL, forwardDial netDialerFunc) (netDialerFunc, error) { + if proxyURL.Scheme == "http" || proxyURL.Scheme == "https" { + return (&httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDial}).DialContext, nil + } + dialer, err := proxy.FromURL(proxyURL, forwardDial) + if err != nil { + return nil, err + } + if d, ok := dialer.(proxy.ContextDialer); ok { + return d.DialContext, nil + } + return func(ctx context.Context, net, addr string) (net.Conn, error) { + return dialer.Dial(net, addr) + }, nil } type httpProxyDialer struct { proxyURL *url.URL - forwardDial func(network, addr string) (net.Conn, error) + forwardDial netDialerFunc } -func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { +func (hpd *httpProxyDialer) DialContext(ctx context.Context, network string, addr string) (net.Conn, error) { hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.forwardDial(network, hostPort) + conn, err := hpd.forwardDial(ctx, network, hostPort) if err != nil { return nil, err } @@ -49,7 +64,6 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) connectHeader.Set("Proxy-Authorization", "Basic "+credential) } } - connectReq := &http.Request{ Method: http.MethodConnect, URL: &url.URL{Opaque: addr}, @@ -58,27 +72,31 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } - // Read response. It's OK to use and discard buffered reader here becaue + // Read response. It's OK to use and discard buffered reader here because // the remote server does not speak until spoken to. br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } - if resp.StatusCode != 200 { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + // Close the response body to silence false positives from linters. Reset + // the buffered reader first to ensure that Close() does not read from + // conn. + // Note: Applications must call resp.Body.Close() on a response returned + // http.ReadResponse to inspect trailers or read another response from the + // buffered reader. The call to resp.Body.Close() does not release + // resources. + br.Reset(bytes.NewReader(nil)) + _ = resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + _ = conn.Close() f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 1e720e1da4..02ea01fdcd 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -6,9 +6,7 @@ package websocket import ( "bufio" - "errors" - "io" - "log" + "net" "net/http" "net/url" "strings" @@ -102,8 +100,8 @@ func checkSameOrigin(r *http.Request) bool { func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { if u.Subprotocols != nil { clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { + for _, clientProtocol := range clientProtocols { + for _, serverProtocol := range u.Subprotocols { if clientProtocol == serverProtocol { return clientProtocol } @@ -131,7 +129,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + w.Header().Set("Upgrade", "websocket") + return u.returnError(w, r, http.StatusUpgradeRequired, badHandshake+"'websocket' token not found in 'Upgrade' header") } if r.Method != http.MethodGet { @@ -173,30 +172,37 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } } - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err := h.Hijack() + netConn, brw, err := http.NewResponseController(w).Hijack() if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) + return u.returnError(w, r, http.StatusInternalServerError, + "websocket: hijack: "+err.Error()) + } + + // Close the network connection when returning an error. The variable + // netConn is set to nil before the success return at the end of the + // function. + defer func() { + if netConn != nil { + // It's safe to ignore the error from Close() because this code is + // only executed when returning a more important error to the + // application. + _ = netConn.Close() } - return nil, errors.New("websocket: client sent data before handshake is complete") - } + }() var br *bufio.Reader - if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { - // Reuse hijacked buffered reader as connection reader. + if u.ReadBufferSize == 0 && brw.Reader.Size() > 256 { + // Use hijacked buffered reader as the connection reader. br = brw.Reader + } else if brw.Reader.Buffered() > 0 { + // Wrap the network connection to read buffered data in brw.Reader + // before reading from the network connection. This should be rare + // because a client must not send message data before receiving the + // handshake response. + netConn = &brNetConn{br: brw.Reader, Conn: netConn} } - buf := bufioWriterBuffer(netConn, brw.Writer) + buf := brw.Writer.AvailableBuffer() var writeBuf []byte if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { @@ -250,37 +256,30 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } p = append(p, "\r\n"...) - // Clear deadlines set by HTTP server. - if err := netConn.SetDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - if u.HandshakeTimeout > 0 { if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + return nil, err + } + } else { + // Clear deadlines set by HTTP server. + if err := netConn.SetDeadline(time.Time{}); err != nil { return nil, err } } + if _, err = netConn.Write(p); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } return nil, err } if u.HandshakeTimeout > 0 { if err := netConn.SetWriteDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } return nil, err } } + // Success! Set netConn to nil to stop the deferred function above from + // closing the network connection. + netConn = nil + return c, nil } @@ -347,43 +346,28 @@ func IsWebSocketUpgrade(r *http.Request) bool { tokenListContainsValue(r.Header, "Upgrade", "websocket") } -// bufioReaderSize size returns the size of a bufio.Reader. -func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { - // This code assumes that peek on a reset reader returns - // bufio.Reader.buf[:0]. - // TODO: Use bufio.Reader.Size() after Go 1.10 - br.Reset(originalReader) - if p, err := br.Peek(0); err == nil { - return cap(p) - } - return 0 +type brNetConn struct { + br *bufio.Reader + net.Conn } -// writeHook is an io.Writer that records the last slice passed to it vio -// io.Writer.Write. -type writeHook struct { - p []byte +func (b *brNetConn) Read(p []byte) (n int, err error) { + if b.br != nil { + // Limit read to buferred data. + if n := b.br.Buffered(); len(p) > n { + p = p[:n] + } + n, err = b.br.Read(p) + if b.br.Buffered() == 0 { + b.br = nil + } + return n, err + } + return b.Conn.Read(p) } -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil +// NetConn returns the underlying connection that is wrapped by b. +func (b *brNetConn) NetConn() net.Conn { + return b.Conn } -// bufioWriterBuffer grabs the buffer from a bufio.Writer. -func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { - // This code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - bw.Reset(&wh) - if err := bw.WriteByte(0); err != nil { - panic(err) - } - if err := bw.Flush(); err != nil { - log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) - } - - bw.Reset(originalWriter) - - return wh.p[:cap(wh.p)] -} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go deleted file mode 100644 index 7f38645348..0000000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ /dev/null @@ -1,18 +0,0 @@ -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.HandshakeContext(ctx); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 9b1a629bff..31a5dee646 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "crypto/sha1" "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h := sha1.New() h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) diff --git a/vendor/github.com/jaypipes/ghw/README.md b/vendor/github.com/jaypipes/ghw/README.md index bad7c86e10..c0011e493a 100644 --- a/vendor/github.com/jaypipes/ghw/README.md +++ b/vendor/github.com/jaypipes/ghw/README.md @@ -345,8 +345,8 @@ Each `ghw.Partition` struct contains these fields: * `ghw.Partition.IsReadOnly` is a bool indicating the partition is read-only * `ghw.Partition.Disk` is a pointer to the `ghw.Disk` object associated with the partition. -* `ghw.Partition.UUID` is a string containing the partition UUID on Linux, the - partition UUID on MacOS and nothing on Windows. On Linux systems, this is +* `ghw.Partition.UUID` is a string containing the partition UUID on Linux and MacOS, + and the `VolumeSerialNumber` on Windows (e.g. "A8C3D032"). On Linux systems, this is derived from the `ID_PART_ENTRY_UUID` [udev][udev] entry for the partition. [udev]: https://en.wikipedia.org/wiki/Udev diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block.go b/vendor/github.com/jaypipes/ghw/pkg/block/block.go index 5e75eea6f0..088060185f 100644 --- a/vendor/github.com/jaypipes/ghw/pkg/block/block.go +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block.go @@ -265,8 +265,9 @@ type Partition struct { Type string `json:"type"` // IsReadOnly indicates if the partition is marked read-only. IsReadOnly bool `json:"read_only"` - // UUID is the universally-unique identifier (UUID) for the partition. - // This will be volume UUID on Darwin, PartUUID on linux, empty on Windows. + // UUID is a unique identifier for the partition. Note that for Windows + // partitions, this field contains a Volume Serial Number which is not + // in the standard UUID format, e.g. "A8C3D032". UUID string `json:"uuid"` // FilesystemLabel is the label of the filesystem contained on the // partition. On Linux, this is derived from the `ID_FS_NAME` udev entry. diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go index 270e19f98d..7c52495d25 100644 --- a/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go @@ -85,18 +85,19 @@ type win32LogicalDiskToPartition struct { Dependent *string } -const wqlLogicalDisk = "SELECT Caption, CreationClassName, Description, DeviceID, FileSystem, FreeSpace, Name, Size, SystemName FROM Win32_LogicalDisk" +const wqlLogicalDisk = "SELECT Caption, CreationClassName, Description, DeviceID, FileSystem, FreeSpace, Name, Size, SystemName, VolumeSerialNumber FROM Win32_LogicalDisk" type win32LogicalDisk struct { - Caption *string - CreationClassName *string - Description *string - DeviceID *string - FileSystem *string - FreeSpace *uint64 - Name *string - Size *uint64 - SystemName *string + Caption *string + CreationClassName *string + Description *string + DeviceID *string + FileSystem *string + FreeSpace *uint64 + Name *string + Size *uint64 + SystemName *string + VolumeSerialNumber *string } const wqlPhysicalDisk = "SELECT DeviceId, MediaType FROM MSFT_PhysicalDisk" @@ -173,13 +174,14 @@ func (i *Info) load() error { if *logicaldisktodiskpartition.Antecedent == desiredAntecedent && *logicaldisktodiskpartition.Dependent == desiredDependent { // Appending Partition p := &Partition{ + Disk: disk, Name: strings.TrimSpace(*logicaldisk.Caption), Label: strings.TrimSpace(*logicaldisk.Caption), SizeBytes: *logicaldisk.Size, MountPoint: *logicaldisk.DeviceID, Type: *diskpartition.Type, IsReadOnly: toReadOnly(*diskpartition.Access), - UUID: "", + UUID: *logicaldisk.VolumeSerialNumber, } disk.Partitions = append(disk.Partitions, p) break diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go index e5e341c120..f61e98bf68 100644 --- a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go +++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go @@ -20,7 +20,7 @@ import ( ) const ( - validPCIAddress = `\b(0{0,4}:\d{2}:\d{2}.\d:?\w*)` + validPCIAddress = `\b(0{0,4}:[[:xdigit:]]{2}:[[:xdigit:]]{2}\.[[:xdigit:]]:?\w*)` ) var reValidPCIAddress = regexp.MustCompile(validPCIAddress) diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 402433593c..0000000000 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index 4528059ca6..0000000000 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,123 +0,0 @@ -version: 2 - -before: - hooks: - - ./gen.sh - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - version_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index de264c85a5..0000000000 --- a/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,721 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) - * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 - * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 - * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 - * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 - * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 - -* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) - * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 - * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 - * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 - * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 - -* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) - * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 - * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 - -* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) - * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 - * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 - -* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) - * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 - * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 - -* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) - * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 - * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 - * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 - * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 - * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 -https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 - -* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) - * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 - * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 - * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 - * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 - * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 - -* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) - * fse: Fix max header size https://github.com/klauspost/compress/pull/881 - * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 - * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 - -* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) - * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 - -* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 - * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -
- See changes to v1.16.x - - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 -
- -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -To disable all assembly add `-tags=noasm`. This works across all packages. - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -```go - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e2b7..0000000000 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d51..0000000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da67..0000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909c..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index e82fa3bb7b..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d605..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 074018d8f9..0000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - src = src[:ip] - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - default: - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - s.bw.close() - return nil -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m, symlen := uint32(0), s.symbolLen - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - symlen = uint16(i) + 1 - } - s.symbolLen = symlen - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - if err := br.init(s.br.unread()); err != nil { - return err - } - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfde..0000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205f..0000000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c6638..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index e36d9742f9..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 0ebc9aaac7..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// encFourSymbols adds up to 32 bits from four symbols. -// It will not check if there is space for them, -// so the caller must ensure that b has been flushed recently. -func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { - bitsA := encA.nBits - bitsB := bitsA + encB.nBits - bitsC := bitsB + encC.nBits - bitsD := bitsC + encD.nBits - combined := uint64(encA.val) | - (uint64(encB.val) << (bitsA & 63)) | - (uint64(encC.val) << (bitsB & 63)) | - (uint64(encD.val) << (bitsC & 63)) - b.bitContainer |= combined << (b.nBits & 63) - b.nBits += bitsD -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 84aa3d12f0..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,742 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil -} - -func (s *Scratch) compress1xDo(dst, src []byte) []byte { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - bw.close() - return bw.out -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - _ = s.count // Assert that s != nil to speed up the following loop. - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else if s.prevTable[i].nBits == 0 { - reuse = false - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.srcLen)) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count() == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) - huffNode[lowS].setParent(nodeNb) - huffNode[lowS-1].setParent(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].setCount(1 << 30) - } - // fake entry, strong barrier - huffNode0[0].setCount(1 << 31) - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) - huffNode0[n1+1].setParent(nodeNb) - huffNode0[n2+1].setParent(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].setNbBits(0) - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits()]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol()].nBits = v.nbBits() - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count() { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits() - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits() > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) - huffNode[n].setNbBits(maxNbBits) - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits() == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits() >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits() // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count() - lowTotal := 2 * huffNode[lowPos].count() - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + - huffNode[rankLast[nBitsToDecrease]].nbBits()) - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits() == maxNbBits { - n-- - } - huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -// A nodeElt is the fields -// -// count uint32 -// parent uint16 -// symbol byte -// nbBits uint8 -// -// in some order, all squashed into an integer so that the compiler -// always loads and stores entire nodeElts instead of separate fields. -type nodeElt uint64 - -func makeNodeElt(count uint32, symbol byte) nodeElt { - return nodeElt(count) | nodeElt(symbol)<<48 -} - -func (e *nodeElt) count() uint32 { return uint32(*e) } -func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } -func (e *nodeElt) symbol() byte { return byte(*e >> 48) } -func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } - -func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } -func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } -func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 0f56b02d74..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, fmt.Errorf("fse decompress returned: %w", err) - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 0 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errors in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errors, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6b02..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index c4c7ab2d1f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,830 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), BX - MOVQ 48(AX), SI - MOVQ 24(AX), R8 - MOVQ 32(AX), R9 - MOVQ (AX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ (R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 24(R10) - ORQ R13, R11 - - // exhausted += (br0.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 48(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 72(R10) - ORQ R13, R11 - - // exhausted += (br1.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 96(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 120(R10) - ORQ R13, R11 - - // exhausted += (br2.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 144(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 168(R10) - ORQ R13, R11 - - // exhausted += (br3.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVW AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x02, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R8 - MOVQ 32(CX), R9 - MOVQ (CX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ (R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 24(R10) - ORQ R14, R11 - - // exhausted += (br0.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 48(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 72(R10) - ORQ R14, R11 - - // exhausted += (br1.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 96(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 120(R10) - ORQ R14, R11 - - // exhausted += (br2.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 144(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 168(R10) - ORQ R14, R11 - - // exhausted += (br3.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVL AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17de63..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index 77ecd68e0a..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - srcLen int - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.srcLen = len(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c51219..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579c4f..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe9e9..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8b..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5d..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2754bac6f1..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { - if MaxEncodedLen(len(src)) > len(dst) { - return 0 - } - - // encodeBlock breaks on too big blocks, so split. - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 5a4412f907..0000000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.19 - diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index 92e2347bbc..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 25ca983941..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) - b.bitsRead = 0 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - return - } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 1952f175b0..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 9c28840c3b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc, if hasCRC is true. - checkCRC uint32 - hasCRC bool - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { - // byteBuf doesn't need a destination buffer. - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxCompressedBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - if debugDecoder { - println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - if compMode&3 != 0 { - return errors.New("corrupt block: reserved bits not zero") - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) - } - case compModeFSE: - if debugDecoder { - println("Reading table for", tableIndex(i)) - } - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 32a7f401d5..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// encodeRLE will encode an RLE block. -func (b *blockEnc) encodeRLE(val byte, length uint32) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(length) - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, val) -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { - // Check common RLE cases. - seq := b.sequences[0] - if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { - // Offset == 1 and 0 or 1 literals. - b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) - return nil - } - } - - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 6) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Discard and encode as raw block. - b.output = b.encodeRawTo(b.output[:bhOffset], org) - b.popOffsets() - b.litEnc.Reuse = huff0.ReusePolicyNone - return nil - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 55a388553d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, io.ErrUnexpectedEOF - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := io.ReadFull(r.r, r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a242d8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 6a5a2988b6..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - _, err := h.DecodeAndStrip(in) - return err -} - -// DecodeAndStrip will decode the header from the beginning of the stream -// and on success return the remaining bytes. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { - *h = Header{} - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if string(b) != frameMagic { - if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return nil, ErrMagicMismatch - } - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return in[4:], nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return nil, errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch len(b) { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch len(b) { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return in, nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return in, nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return in, nil -} - -// AppendTo will append the encoded header to the dst slice. -// There is no error checking performed on the header values. -func (h *Header) AppendTo(dst []byte) ([]byte, error) { - if h.Skippable { - magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} - magic[0] |= byte(h.SkippableID & 0xf) - dst = append(dst, magic[:]...) - f := h.SkippableSize - return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil - } - f := frameHeader{ - ContentSize: h.FrameContentSize, - WindowSize: uint32(h.WindowSize), - SingleSegment: h.SingleSegment, - Checksum: h.HasCheckSum, - DictID: h.DictionaryID, - } - return f.appendTo(dst), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index bbca17234a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,948 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - dicts map[uint32]*dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if err = d.setDict(frame); err != nil { - return nil, err - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if d.o.ignoreChecksum { - return true - } - - if len(next.b) > 0 { - d.current.crc.Write(next.b) - } - if next.err == nil && next.d != nil && next.d.hasCRC { - got := uint32(d.current.crc.Sum64()) - if got != next.d.checkCRC { - if debugDecoder { - printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - printf("CRC ok %08x\n", got) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err == nil { - d.current.err = d.setDict(d.frame) - } - if d.current.err != nil { - return false - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxCompressedBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil { - err = d.setDict(frame) - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.hasCRC = false - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if len(crc) < 4 { - if err == nil { - err = io.ErrUnexpectedEOF - - } - println("CRC missing?", err) - dec.err = err - } else { - dec.checkCRC = binary.LittleEndian.Uint32(crc) - dec.hasCRC = true - if debugDecoder { - printf("found crc to check: %08x\n", dec.checkCRC) - } - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} - -func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] - if ok { - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(dict) - } else if frame.DictionaryID != 0 { - // A zero or missing dictionary id is ambiguous: - // either dictionary zero, or no dictionary. In particular, - // zstd --patch-from uses this id for the source file, - // so only return an error if the dictionary id is not zero. - err = ErrUnknownDictionary - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 774c5f00fe..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math/bits" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []*dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// -// Each slice in dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// If several dictionaries with the same ID are provided, the last one will be used. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, d) - } - return nil - } -} - -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. -// The slice content can be arbitrary data. -func WithDecoderDictRaw(id uint32, content []byte) DOption { - return func(o *decoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index b7b83164bc..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,565 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte -} - -const dictMagic = "\x37\xa4\x30\xec" - -// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. -const dictMaxLength = 1 << 31 - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// ContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) ContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Content returns the dictionary content. -func (d *dict) Content() []byte { - if d == nil { - return nil - } - return d.content -} - -// Offsets returns the initial offsets. -func (d *dict) Offsets() [3]int { - if d == nil { - return [3]int{} - } - return d.offsets -} - -// LitEncoder returns the literal encoder. -func (d *dict) LitEncoder() *huff0.Scratch { - if d == nil { - return nil - } - return d.litEnc -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if string(b[:4]) != dictMagic { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, fmt.Errorf("loading literal table: %w", err) - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} - -// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. -func InspectDictionary(b []byte) (interface { - ID() uint32 - ContentSize() int - Content() []byte - Offsets() [3]int - LitEncoder() *huff0.Scratch -}, error) { - initPredefined() - d, err := loadDict(b) - return d, err -} - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - if len(block.sequences) == 0 { - continue - } - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if int(offset) >= len(o.History) { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - if nUsed == 0 { - nUsed = 1 - } - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - - // Ensure we aren't trying to represent RLE. - if maxCount == fakeLength { - for i := range hist { - if uint8(i) == maxSym { - fakeLength++ - maxSym++ - hist[i+1] = 1 - if maxSym > 1 { - break - } - } - if hist[0] == 0 { - fakeLength++ - hist[i] = 1 - if maxSym > 1 { - break - } - } - } - } - - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 5ca46038ad..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,173 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - bufferReset int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > e.bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - e.blk.dictLitEnc = nil - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.ContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < e.bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 4613724e9d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = maxMatchLen * 8 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep) & 3) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [bestShortTableSize]prevEntry{} - e.longTable = [bestLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - const goodEnough = 250 - - cv := load6432(src, s) - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - // Set m to a match at offset if it looks like that will improve compression. - improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { - return - } - // Try to quick reject if we already have a long match. - if m.length > 16 { - left := len(src) - int(m.s+m.length) - // If we are too close to the end, keep as is. - if left <= 0 { - return - } - checkLen := m.length - (s - m.s) - 8 - if left > 2 && checkLen > 4 { - // Check 4 bytes, 4 bytes from the end of the current match. - a := load3232(src, offset+checkLen) - b := load3232(src, s+checkLen) - if a != b { - return - } - } - } - l := 4 + e.matchlen(s+4, offset+4, src) - if m.rep <= 0 { - // Extend candidate match backwards as far as possible. - // Do not extend repeats as we can assume they are optimal - // and offsets change if s == nextEmit. - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { - s-- - offset-- - l++ - } - } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { - panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - cand := match{offset: offset, s: s, length: l, rep: rep} - cand.estBits(bitsPerByte) - if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { - *m = cand - } - } - - best := match{s: s, est: highScore} - improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) - - if canRepeat && best.length < goodEnough { - if s == nextEmit { - // Check repeats straight after a match. - improve(&best, s-offset2, s, uint32(cv), 1|4) - improve(&best, s-offset3, s, uint32(cv), 2|4) - if offset1 > 1 { - improve(&best, s-(offset1-1), s, uint32(cv), 3|4) - } - } - - // If either no match or a non-repeat match, check at + 1 - if best.rep <= 0 { - cv32 := uint32(cv >> 8) - spp := s + 1 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - if best.rep < 0 { - cv32 = uint32(cv >> 24) - spp += 2 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - } - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - index0 := s + 1 - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - continue - } - - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s+1) - cv2 := load6432(src, s+2) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) - // Long at s+1, s+2 - improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) - improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) - if false { - // Short at s+3. - // Too often worse... - improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) - } - - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - // We cannot do this if we have already indexed this position. - const skipBeginning = 2 - if best.s > s-skipBeginning { - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - - if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - } - } - } - } - } - - if debugAsserts { - if best.offset >= best.s { - panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) - } - if best.s < nextEmit { - panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) - } - if best.offset < s-e.maxMatchOff { - panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) - } - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - s = best.s - if best.rep > 0 { - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - addLiterals(&seq, best.s) - - // Repeat. If bit 4 is set, this is a non-lit repeat. - seq.offset = uint32(best.rep & 3) - if debugSequences { - println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) - } - blk.sequences = append(blk.sequences, seq) - - // Index old s + 1 -> s - 1 - s = best.s + best.length - nextEmit = s - - // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - - switch best.rep { - case 2, 4 | 1: - offset1, offset2 = offset2, offset1 - case 3, 4 | 2: - offset1, offset2, offset3 = offset3, offset1, offset2 - case 4 | 3: - offset1, offset2, offset3 = offset1-1, offset1, offset2 - } - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Write our sequence - var seq seq - l := best.length - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - - // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } - - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - off++ - } - if s >= sLimit { - break encodeLoop - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index 84a79fde76..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1252 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [betterShortTableSize]tableEntry{} - e.longTable = [betterLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += length + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += length + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - s += length + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += length + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index d36be7bd8c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [dFastShortTableSize]tableEntry{} - e.longTable = [dFastLongTableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f45a3da7da..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxCompressedBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [tableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 8f8223cd3a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "math" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - if s.eofWritten { - return 0, ErrEncoderClosed - } - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst := fh.appendTo(tmp[:0]) - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.err != nil { - return s.err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - if final { - s.eofWritten = true - } - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.writeErr != nil { - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - // Ignore Flush after Close. - if errors.Is(s.err, ErrEncoderClosed) { - return nil - } - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - // Ignore Flush after Close. - if errors.Is(s.err, ErrEncoderClosed) { - return nil - } - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - if errors.Is(s.err, ErrEncoderClosed) { - return nil - } - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - if s.err == nil { - s.err = ErrEncoderClosed - return nil - } - - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - e.encoders <- enc - }() - return e.encodeAll(enc, src, dst) -} - -func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst = fh.appendTo(dst) - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - oldout := blk.output - // Output directly to dst - blk.output = dst - - err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = blk.output - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = append(dst, blk.output...) - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} - -// MaxEncodedSize returns the expected maximum -// size of an encoded block or stream. -func (e *Encoder) MaxEncodedSize(size int) int { - frameHeader := 4 + 2 // magic + frame header & window descriptor - if e.o.dict != nil { - frameHeader += 4 - } - // Frame content size: - if size < 256 { - frameHeader++ - } else if size < 65536+256 { - frameHeader += 2 - } else if size < math.MaxInt32 { - frameHeader += 4 - } else { - frameHeader += 8 - } - // Final crc - if e.o.crc { - frameHeader += 4 - } - - // Max overhead is 3 bytes/block. - // There cannot be 0 blocks. - blocks := (size + e.o.blockSize) / e.o.blockSize - - // Combine, add padding. - maxSz := frameHeader + 3*blocks + size - if e.o.pad > 1 { - maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) - } - return maxSz -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index 20671dcb91..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,339 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: false, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level and max 8MB. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - n = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 8 << 20 - case SpeedBestCompression: - o.windowSize = 8 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedDefault - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// -// The slice dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// The encoder *may* choose to use no dictionary instead for certain payloads. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} - -// WithEncoderDictRaw registers a dictionary that may be used by the encoder. -// -// The slice content may contain arbitrary data. It will be used as an initial -// history. -func WithEncoderDictRaw(id uint32, content []byte) EOption { - return func(o *encoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index e47af66e7c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -const ( - frameMagic = "\x28\xb5\x2f\xfd" - skippableFrameMagic = "\x2a\x4d\x18" -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - case nil: - signature[0] = b[0] - default: - return err - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - case nil: - copy(signature[1:], b) - default: - return err - } - - if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if string(signature[:]) != frameMagic { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - if debugDecoder { - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - } - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch len(b) { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - d.DictionaryID = id - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch len(b) { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or window size below 2MB. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - if d.o.lowMem { - // Alloc with 1MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 - } else { - // Alloc with 2MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum, assuming the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - // We can overwrite upper tmp now - buf, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - want := binary.LittleEndian.Uint32(buf[:4]) - got := uint32(d.crc.Sum64()) - - if got != want { - if debugDecoder { - printf("CRC check failed: got %08x, want %08x\n", got, want) - } - return ErrCRCMismatch - } - if debugDecoder { - printf("CRC ok %08x\n", got) - } - return nil -} - -// consumeCRC skips over the checksum, assuming the frame has one. -func (d *frameDec) consumeCRC() error { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - } - return err -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - d.crc.Write(dst[crcStart:]) - err = d.checkCRC() - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 667ca06794..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) []byte { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are not stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a722..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829b0a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde398695..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 8adfebb029..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - v = 1 - } - symbolNext[i] = uint16(v) - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - for { - // lowprob area - position = (position + step) & tableMask - if position <= highThreshold { - break - } - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326a8f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21ebd..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 09164856d2..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 777290d44c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index fc40c82001..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index ddb63aa91b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,210 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index ae7d4d3295..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,184 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(s *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD s+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index d4221edf4f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 0be16cefc7..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb102..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b7a4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 0782b86e3d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31c02..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index d7fe6d82d9..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - if debugDecoder { - println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") - } - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fill() - } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index c59f17e07a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,394 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - "io" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorOverread: - return true, io.ErrUnexpectedEOF - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// error reported when bits are overread. -const errorOverread = 6 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - if debugDecoder { - println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - case errorOverread: - return io.ErrUnexpectedEOF - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if debugDecoder { - println("decode: ", br.remain(), "bits remain on stream. code:", errCode) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index f5591fa1e8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4151 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index 2fb35b788c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a77..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index ec13594e89..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - var n int - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8c4e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 066bef2a4f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,125 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrEncoderClosed will be returned if the Encoder was used after - // Close has been called. - ErrEncoderClosed = errors.New("encoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 0c29708d2b..69b15d1848 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,81 @@ +## 2.23.4 + +Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix! + +### Features +- Add automaxprocs for using CPUQuota [2b9c428] + +### Fixes +- clarify gotchas about -vet flag [1f59d07] + +### Maintenance +- bump dependencies [2d134d5] + +## 2.23.3 + +### Fixes + +- allow `-` as a standalone argument [cfcc1a5] +- Bug Fix: Add GinkoTBWrapper.Chdir() and GinkoTBWrapper.Context() [feaf292] +- ignore exit code for symbol test on linux [88e2282] + +## 2.23.2 + +🎉🎉🎉 + +At long last, some long-standing performance gaps between `ginkgo` and `go test` have been resolved! + +Ginkgo operates by running `go test -c` to generate test binaries, and then running those binaries. It turns out that the compilation step of `go test -c` is slower than `go test`'s compilation step because `go test` strips out debug symbols (`ldflags=-w`) whereas `go test -c` does not. + +Ginkgo now passes the appropriate `ldflags` to `go test -c` when running specs to strip out symbols. This is only done when it is safe to do so and symbols are preferred when profiling is enabled and when `ginkgo build` is called explicitly. + +This, coupled, with the [instructions for disabling XProtect on MacOS](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) yields a much better performance experience with Ginkgo. + +## 2.23.1 + +## 🚨 For users on MacOS 🚨 + +A long-standing Ginkgo performance issue on MacOS seems to be due to mac's antimalware XProtect. You can follow the instructions [here](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) to disable it in your terminal. Doing so sped up Ginkgo's own test suite from 1m8s to 47s. + +### Fixes + +Ginkgo's CLI is now a bit clearer if you pass flags in incorrectly: + +- make it clearer that you need to pass a filename to the various profile flags, not an absolute directory [a0e52ff] +- emit an error and exit if the ginkgo invocation includes flags after positional arguments [b799d8d] + +This might cause existing CI builds to fail. If so then it's likely that your CI build was misconfigured and should be corrected. Open an issue if you need help. + +## 2.23.0 + +Ginkgo 2.23.0 adds a handful of methods to `GinkgoT()` to make it compatible with the `testing.TB` interface in Go 1.24. `GinkgoT().Context()`, in particular, is a useful shorthand for generating a new context that will clean itself up in a `DeferCleanup()`. This has subtle behavior differences from the golang implementation but should make sense in a Ginkgo... um... context. + +### Features +- bump to go 1.24.0 - support new testing.TB methods and add a test to cover testing.TB regressions [37a511b] + +### Fixes +- fix edge case where build -o is pointing at an explicit file, not a directory [7556a86] +- Fix binary paths when precompiling multiple suites. [4df06c6] + +### Maintenance +- Fix: Correct Markdown list rendering in MIGRATING_TO_V2.md [cbcf39a] +- docs: fix test workflow badge (#1512) [9b261ff] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1516) [00f19c8] +- Bump golang.org/x/tools from 0.28.0 to 0.30.0 (#1515) [e98a4df] +- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#1504) [60cc4e2] +- Bump github-pages from 231 to 232 in /docs (#1447) [fea6f2d] +- Bump rexml from 3.2.8 to 3.3.9 in /docs (#1497) [31d7813] +- Bump webrick from 1.8.1 to 1.9.1 in /docs (#1501) [fc3bbd6] +- Code linting (#1500) [aee0d56] +- change interface{} to any (#1502) [809a710] + +## 2.22.2 + +### Maintenance +- Bump github.com/onsi/gomega from 1.36.1 to 1.36.2 (#1499) [cc553ce] +- Bump golang.org/x/crypto (#1498) [2170370] +- Bump golang.org/x/net from 0.32.0 to 0.33.0 (#1496) [a96c44f] + ## 2.22.1 ### Fixes @@ -623,7 +701,7 @@ Ginkgo also uses this progress reporting infrastructure under the hood when hand ### Features - `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes. - As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. + As a result the **signature of these methods has changed** and now includes a trailing `args ...any`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. ### Maintenance - Modernize the invocation of Ginkgo in github actions [0ffde58] @@ -1035,7 +1113,7 @@ New Features: - `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. - `ginkgo --failFast` aborts the test suite after the first failure. - `ginkgo generate file_1 file_2` can take multiple file arguments. -- Ginkgo now summarizes any spec failures that occurred at the end of the test run. +- Ginkgo now summarizes any spec failures that occurred at the end of the test run. - `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. Improvements: @@ -1069,7 +1147,7 @@ Bug Fixes: Breaking changes: - `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead -- Modified the Reporter interface +- Modified the Reporter interface - `watch` is now a subcommand, not a flag. DSL changes: diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index cb23ffdf6a..e3d0c13cc6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -1,6 +1,6 @@ ![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png) -[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) +[![test](https://github.com/onsi/ginkgo/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) --- diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index a3e8237e93..d027bdff93 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -83,9 +83,9 @@ func exitIfErrors(errors []error) { type GinkgoWriterInterface interface { io.Writer - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) TeeTo(writer io.Writer) ClearTeeWriters() @@ -243,7 +243,7 @@ for more on how specs are parallelized in Ginkgo. You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite. */ -func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { +func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { if suiteDidRun { exitIfErr(types.GinkgoErrors.RerunningSuite()) } @@ -316,7 +316,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { return passed } -func extractSuiteConfiguration(args []interface{}) Labels { +func extractSuiteConfiguration(args []any) Labels { suiteLabels := Labels{} configErrors := []error{} for _, arg := range args { @@ -491,14 +491,14 @@ to Describe the behavior of an object or function and, within that Describe, out You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ -func Describe(text string, args ...interface{}) bool { +func Describe(text string, args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) } /* FDescribe focuses specs within the Describe block. */ -func FDescribe(text string, args ...interface{}) bool { +func FDescribe(text string, args ...any) bool { args = append(args, internal.Focus) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) } @@ -506,7 +506,7 @@ func FDescribe(text string, args ...interface{}) bool { /* PDescribe marks specs within the Describe block as pending. */ -func PDescribe(text string, args ...interface{}) bool { +func PDescribe(text string, args ...any) bool { args = append(args, internal.Pending) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) } @@ -522,18 +522,18 @@ var XDescribe = PDescribe var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe /* When is an alias for Describe - it generates the exact same kind of Container node */ -func When(text string, args ...interface{}) bool { +func When(text string, args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) } /* When is an alias for Describe - it generates the exact same kind of Container node */ -func FWhen(text string, args ...interface{}) bool { +func FWhen(text string, args ...any) bool { args = append(args, internal.Focus) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) } /* When is an alias for Describe - it generates the exact same kind of Container node */ -func PWhen(text string, args ...interface{}) bool { +func PWhen(text string, args ...any) bool { args = append(args, internal.Pending) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) } @@ -550,14 +550,14 @@ You can pass It nodes bare functions (func() {}) or functions that receive a Spe You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ -func It(text string, args ...interface{}) bool { +func It(text string, args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) } /* FIt allows you to focus an individual It. */ -func FIt(text string, args ...interface{}) bool { +func FIt(text string, args ...any) bool { args = append(args, internal.Focus) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) } @@ -565,7 +565,7 @@ func FIt(text string, args ...interface{}) bool { /* PIt allows you to mark an individual It as pending. */ -func PIt(text string, args ...interface{}) bool { +func PIt(text string, args ...any) bool { args = append(args, internal.Pending) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) } @@ -611,8 +611,8 @@ BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(c You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func BeforeSuite(body interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{body} +func BeforeSuite(body any, args ...any) bool { + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) } @@ -630,8 +630,8 @@ AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(co You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func AfterSuite(body interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{body} +func AfterSuite(body any, args ...any) bool { + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) } @@ -667,8 +667,8 @@ If either function receives a context.Context/SpecContext it is considered inter You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{process1Body, allProcessBody} +func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) bool { + combinedArgs := []any{process1Body, allProcessBody} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) @@ -687,8 +687,8 @@ Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accompli You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{allProcessBody, process1Body} +func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) bool { + combinedArgs := []any{allProcessBody, process1Body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) @@ -703,7 +703,7 @@ BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(co You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ -func BeforeEach(args ...interface{}) bool { +func BeforeEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) } @@ -716,7 +716,7 @@ JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/fun You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ -func JustBeforeEach(args ...interface{}) bool { +func JustBeforeEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) } @@ -731,7 +731,7 @@ AfterEach can take a func() body, or an interruptible func(SpecContext)/func(con You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ -func AfterEach(args ...interface{}) bool { +func AfterEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) } @@ -743,7 +743,7 @@ JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ -func JustAfterEach(args ...interface{}) bool { +func JustAfterEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) } @@ -758,7 +758,7 @@ You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ -func BeforeAll(args ...interface{}) bool { +func BeforeAll(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) } @@ -775,7 +775,7 @@ You cannot nest any other Ginkgo nodes within an AfterAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ -func AfterAll(args ...interface{}) bool { +func AfterAll(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) } @@ -818,7 +818,7 @@ When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node. You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup */ -func DeferCleanup(args ...interface{}) { +func DeferCleanup(args ...any) { fail := func(message string, cl types.CodeLocation) { global.Failer.Fail(message, cl) } diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go index f912bbec65..fd45b8beab 100644 --- a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -118,9 +118,9 @@ Use Gomega's gmeasure package instead. You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code */ type Benchmarker interface { - Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) - RecordValue(name string, value float64, info ...interface{}) - RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) + Time(name string, body func(), info ...any) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...any) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...any) } /* @@ -129,7 +129,7 @@ Deprecated: Measure() has been removed from Ginkgo 2.0 Use Gomega's gmeasure package instead. You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code */ -func Measure(_ ...interface{}) bool { +func Measure(_ ...any) bool { deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1)) return true } diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 4d5749114c..f61356db19 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -24,15 +24,15 @@ const ( var SingletonFormatter = New(ColorModeTerminal) -func F(format string, args ...interface{}) string { +func F(format string, args ...any) string { return SingletonFormatter.F(format, args...) } -func Fi(indentation uint, format string, args ...interface{}) string { +func Fi(indentation uint, format string, args ...any) string { return SingletonFormatter.Fi(indentation, format, args...) } -func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { +func Fiw(indentation uint, maxWidth uint, format string, args ...any) string { return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) } @@ -115,15 +115,15 @@ func New(colorMode ColorMode) Formatter { return f } -func (f Formatter) F(format string, args ...interface{}) string { +func (f Formatter) F(format string, args ...any) string { return f.Fi(0, format, args...) } -func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { +func (f Formatter) Fi(indentation uint, format string, args ...any) string { return f.Fiw(indentation, 0, format, args...) } -func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { out := f.style(format) if len(args) > 0 { out = fmt.Sprintf(out, args...) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index fd17260843..2b36b2feb9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -44,7 +44,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go internal.VerifyCLIAndFrameworkVersion(suites) opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) - opc.StartCompiling(suites, goFlagsConfig) + opc.StartCompiling(suites, goFlagsConfig, true) for { suiteIdx, suite := opc.Next() @@ -55,18 +55,22 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - if len(goFlagsConfig.O) == 0 { - goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") - } else { + var testBinPath string + if len(goFlagsConfig.O) != 0 { stat, err := os.Stat(goFlagsConfig.O) if err != nil { panic(err) } if stat.IsDir() { - goFlagsConfig.O += "/" + suite.PackageName + ".test" + testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test" + } else { + testBinPath = goFlagsConfig.O } } - fmt.Printf("Compiled %s\n", goFlagsConfig.O) + if len(testBinPath) == 0 { + testBinPath = path.Join(suite.Path, suite.PackageName+".test") + } + fmt.Printf("Compiled %s\n", testBinPath) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go index 2efd286088..f0e7331f7d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -12,7 +12,7 @@ func Abort(details AbortDetails) { panic(details) } -func AbortGracefullyWith(format string, args ...interface{}) { +func AbortGracefullyWith(format string, args ...any) { Abort(AbortDetails{ ExitCode: 0, Error: fmt.Errorf(format, args...), @@ -20,7 +20,7 @@ func AbortGracefullyWith(format string, args ...interface{}) { }) } -func AbortWith(format string, args ...interface{}) { +func AbortWith(format string, args ...any) { Abort(AbortDetails{ ExitCode: 1, Error: fmt.Errorf(format, args...), @@ -28,7 +28,7 @@ func AbortWith(format string, args ...interface{}) { }) } -func AbortWithUsage(format string, args ...interface{}) { +func AbortWithUsage(format string, args ...any) { Abort(AbortDetails{ ExitCode: 1, Error: fmt.Errorf(format, args...), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go index 12e0e56591..79b83a3af1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -24,7 +24,11 @@ func (c Command) Run(args []string, additionalArgs []string) { if err != nil { AbortWithUsage(err.Error()) } - + for _, arg := range args { + if len(arg) > 1 && strings.HasPrefix(arg, "-") { + AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error()) + } + } c.Command(args, additionalArgs) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go index 88dd8d6b07..c3f6d3a11e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -68,7 +68,6 @@ func (p Program) RunAndExit(osArgs []string) { fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) } p.Exiter(exitCode) - return }() args, additionalArgs := []string{}, []string{} @@ -157,7 +156,6 @@ func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { p.EmitUsage(writer) Abort(AbortDetails{ExitCode: 1}) } - return } func (p Program) EmitUsage(writer io.Writer) { diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 48827cc5ef..7bbe6be0fc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -11,7 +11,7 @@ import ( "github.com/onsi/ginkgo/v2/types" ) -func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite { if suite.PathToCompiledTest != "" { return suite } @@ -46,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) @@ -120,7 +120,7 @@ func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { } } -func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) { opc.stopped = false opc.idx = 0 opc.numSuites = len(suites) @@ -135,7 +135,7 @@ func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsCon stopped := opc.stopped opc.mutex.Unlock() if !stopped { - suite = CompileSuite(suite, goFlagsConfig) + suite = CompileSuite(suite, goFlagsConfig, preserveSymbols) } c <- suite } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go index 3c5079ff4c..87cfa11194 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -89,7 +89,7 @@ func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) } i := 0 - if sortFunc(i) != true { + if !sortFunc(i) { i = sort.Search(len(p.Blocks)-startIndex, sortFunc) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index e9abb27d8b..bd6b8fbff3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" "os" - + _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go index aaed4d570e..03875b9796 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -107,7 +107,7 @@ OUTER_LOOP: } opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) - opc.StartCompiling(suites, r.goFlagsConfig) + opc.StartCompiling(suites, r.goFlagsConfig, false) SUITE_LOOP: for { @@ -142,7 +142,7 @@ OUTER_LOOP: } if !endTime.IsZero() { - r.suiteConfig.Timeout = endTime.Sub(time.Now()) + r.suiteConfig.Timeout = time.Until(endTime) if r.suiteConfig.Timeout <= 0 { suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout opc.StopAndDrain() diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go index bde4193ce7..fe1ca30519 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -153,7 +153,7 @@ func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { } func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { - suite = internal.CompileSuite(suite, w.goFlagsConfig) + suite = internal.CompileSuite(suite, w.goFlagsConfig, false) if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) return suite diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 02c6739e5b..993279de29 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,6 +1,7 @@ package ginkgo import ( + "context" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -48,6 +49,8 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the */ type GinkgoTInterface interface { Cleanup(func()) + Chdir(dir string) + Context() context.Context Setenv(kev, value string) Error(args ...any) Errorf(format string, args ...any) @@ -127,6 +130,12 @@ type GinkgoTBWrapper struct { func (g *GinkgoTBWrapper) Cleanup(f func()) { g.GinkgoT.Cleanup(f) } +func (g *GinkgoTBWrapper) Chdir(dir string) { + g.GinkgoT.Chdir(dir) +} +func (g *GinkgoTBWrapper) Context() context.Context { + return g.GinkgoT.Context() +} func (g *GinkgoTBWrapper) Error(args ...any) { g.GinkgoT.Error(args...) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go index e9bd9565fc..8c5de9c160 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go @@ -32,7 +32,7 @@ func (f *Failer) GetFailure() types.Failure { return f.failure } -func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic any) { f.lock.Lock() defer f.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index 8ed86111f7..79bfa87db2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -40,7 +40,7 @@ func (ic InterruptCause) String() string { } type InterruptStatus struct { - Channel chan interface{} + Channel chan any Level InterruptLevel Cause InterruptCause } @@ -62,14 +62,14 @@ type InterruptHandlerInterface interface { } type InterruptHandler struct { - c chan interface{} + c chan any lock *sync.Mutex level InterruptLevel cause InterruptCause client parallel_support.Client - stop chan interface{} + stop chan any signals []os.Signal - requestAbortCheck chan interface{} + requestAbortCheck chan any } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { @@ -77,10 +77,10 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) * signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ - c: make(chan interface{}), + c: make(chan any), lock: &sync.Mutex{}, - stop: make(chan interface{}), - requestAbortCheck: make(chan interface{}), + stop: make(chan any), + requestAbortCheck: make(chan any), client: client, signals: signals, } @@ -98,9 +98,9 @@ func (handler *InterruptHandler) registerForInterrupts() { signal.Notify(signalChannel, handler.signals...) // cross-process abort handling - var abortChannel chan interface{} + var abortChannel chan any if handler.client != nil { - abortChannel = make(chan interface{}) + abortChannel = make(chan any) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { @@ -125,7 +125,7 @@ func (handler *InterruptHandler) registerForInterrupts() { }() } - go func(abortChannel chan interface{}) { + go func(abortChannel chan any) { var interruptCause InterruptCause for { select { @@ -151,7 +151,7 @@ func (handler *InterruptHandler) registerForInterrupts() { } if handler.level != oldLevel { close(handler.c) - handler.c = make(chan interface{}) + handler.c = make(chan any) } handler.lock.Unlock() } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 0686f74103..8096950b6c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -84,7 +84,7 @@ const SuppressProgressReporting = suppressProgressReporting(true) type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint -type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing +type Done chan<- any // Deprecated Done Channel for asynchronous testing type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration @@ -110,9 +110,9 @@ func UnionOfLabels(labels ...Labels) Labels { return out } -func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { - decorations := []interface{}{} - remainingArgs := []interface{}{} +func PartitionDecorations(args ...any) ([]any, []any) { + decorations := []any{} + remainingArgs := []any{} for _, arg := range args { if isDecoration(arg) { decorations = append(decorations, arg) @@ -123,7 +123,7 @@ func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { return decorations, remainingArgs } -func isDecoration(arg interface{}) bool { +func isDecoration(arg any) bool { switch t := reflect.TypeOf(arg); { case t == nil: return false @@ -168,7 +168,7 @@ func isDecoration(arg interface{}) bool { } } -func isSliceOfDecorations(slice interface{}) bool { +func isSliceOfDecorations(slice any) bool { vSlice := reflect.ValueOf(slice) if vSlice.Len() == 0 { return false @@ -184,7 +184,7 @@ func isSliceOfDecorations(slice interface{}) bool { var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { +func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (Node, []error) { baseOffset := 2 node := Node{ ID: UniqueNodeID(), @@ -207,7 +207,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) - remainingArgs := []interface{}{} + remainingArgs := []any{} // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { @@ -223,7 +223,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy labelsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs - remainingArgs = []interface{}{} + remainingArgs = []any{} // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { @@ -451,7 +451,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy var doneType = reflect.TypeOf(make(Done)) -func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { +func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg any) (func(SpecContext), bool) { t := reflect.TypeOf(arg) if t.NumOut() > 0 || t.NumIn() > 1 { return nil, false @@ -477,7 +477,7 @@ func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types. var byteType = reflect.TypeOf([]byte{}) -func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { +func extractSynchronizedBeforeSuiteProc1Body(arg any) (func(SpecContext) []byte, bool) { t := reflect.TypeOf(arg) v := reflect.ValueOf(arg) @@ -505,7 +505,7 @@ func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) }, hasContext } -func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { +func extractSynchronizedBeforeSuiteAllProcsBody(arg any) (func(SpecContext, []byte), bool) { t := reflect.TypeOf(arg) v := reflect.ValueOf(arg) hasContext, hasByte := false, false @@ -536,11 +536,11 @@ func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecConte var errInterface = reflect.TypeOf((*error)(nil)).Elem() -func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { +func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...any) (Node, []error) { decorations, remainingArgs := PartitionDecorations(args...) baseOffset := 2 cl := types.NewCodeLocation(baseOffset) - finalArgs := []interface{}{} + finalArgs := []any{} for _, arg := range decorations { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(Offset(0)): @@ -920,12 +920,12 @@ func (n Nodes) GetMaxMustPassRepeatedly() int { return maxMustPassRepeatedly } -func unrollInterfaceSlice(args interface{}) []interface{} { +func unrollInterfaceSlice(args any) []any { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { - return []interface{}{args} + return []any{args} } - out := []interface{}{} + out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go index 4a1c094612..5598f15cbb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -69,7 +69,7 @@ type pipePair struct { writer *os.File } -func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) { +func startPipeFactory(pipeChannel chan pipePair, shutdown chan any) { for { //make the next pipe... pair := pipePair{} @@ -101,8 +101,8 @@ type genericOutputInterceptor struct { stderrClone *os.File pipe pipePair - shutdown chan interface{} - emergencyBailout chan interface{} + shutdown chan any + emergencyBailout chan any pipeChannel chan pipePair interceptedContent chan string @@ -139,7 +139,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { interceptor.intercepting = true if interceptor.stdoutClone == nil { interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones() - interceptor.shutdown = make(chan interface{}) + interceptor.shutdown = make(chan any) go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) } @@ -147,13 +147,13 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running interceptor.pipe = <-interceptor.pipeChannel - interceptor.emergencyBailout = make(chan interface{}) + interceptor.emergencyBailout = make(chan any) //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting go func() { buffer := &bytes.Buffer{} destination := io.MultiWriter(buffer, interceptor.forwardTo) - copyFinished := make(chan interface{}) + copyFinished := make(chan any) reader := interceptor.pipe.reader go func() { io.Copy(destination, reader) @@ -224,7 +224,7 @@ func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor { return &genericOutputInterceptor{ interceptedContent: make(chan string), pipeChannel: make(chan pipePair), - shutdown: make(chan interface{}), + shutdown: make(chan any), implementation: &osGlobalReassigningOutputInterceptorImpl{}, } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index 8a237f4463..e0f1431d51 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -13,7 +13,7 @@ func NewOutputInterceptor() OutputInterceptor { return &genericOutputInterceptor{ interceptedContent: make(chan string), pipeChannel: make(chan pipePair), - shutdown: make(chan interface{}), + shutdown: make(chan any), implementation: &dupSyscallOutputInterceptorImpl{}, } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index b3cd64292a..4234d802cf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -30,7 +30,7 @@ type Server interface { Close() Address() string RegisterAlive(node int, alive func() bool) - GetSuiteDone() chan interface{} + GetSuiteDone() chan any GetOutputDestination() io.Writer SetOutputDestination(io.Writer) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index 6547c7a66e..4aa10ae4f9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -34,7 +34,7 @@ func (client *httpClient) Close() error { return nil } -func (client *httpClient) post(path string, data interface{}) error { +func (client *httpClient) post(path string, data any) error { var body io.Reader if data != nil { encoded, err := json.Marshal(data) @@ -54,7 +54,7 @@ func (client *httpClient) post(path string, data interface{}) error { return nil } -func (client *httpClient) poll(path string, data interface{}) error { +func (client *httpClient) poll(path string, data any) error { for { resp, err := http.Get(client.serverHost + path) if err != nil { @@ -153,10 +153,7 @@ func (client *httpClient) PostAbort() error { func (client *httpClient) ShouldAbort() bool { err := client.poll("/abort", nil) - if err == ErrorGone { - return true - } - return false + return err == ErrorGone } func (client *httpClient) Write(p []byte) (int, error) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index d2c71ab1b2..8a1b7a5bbe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -75,7 +75,7 @@ func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } -func (server *httpServer) GetSuiteDone() chan interface{} { +func (server *httpServer) GetSuiteDone() chan any { return server.handler.done } @@ -96,7 +96,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // // The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` -func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { writer.WriteHeader(http.StatusBadRequest) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index 59e8e6fd0a..bb4675a02c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -35,7 +35,7 @@ func (client *rpcClient) Close() error { return client.client.Close() } -func (client *rpcClient) poll(method string, data interface{}) error { +func (client *rpcClient) poll(method string, data any) error { for { err := client.client.Call(method, voidSender, data) if err == nil { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go index 2620fd562d..1574f99ac4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -25,7 +25,7 @@ type RPCServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -37,7 +37,7 @@ func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, e }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *RPCServer) Start() { rpcServer := rpc.NewServer() rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server @@ -48,17 +48,17 @@ func (server *RPCServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *RPCServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *RPCServer) Address() string { return server.listener.Addr().String() } -func (server *RPCServer) GetSuiteDone() chan interface{} { +func (server *RPCServer) GetSuiteDone() chan any { return server.handler.done } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index a6d98793e9..ab9e11372c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,7 +18,7 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} + done chan any outputDestination io.Writer reporter reporters.Reporter alives []func() bool @@ -46,7 +46,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan parallelTotal: parallelTotal, outputDestination: os.Stdout, - done: make(chan interface{}), + done: make(chan any), } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go index cc351a39bd..9c18dc8e58 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -8,7 +8,7 @@ import ( type ReportEntry = types.ReportEntry -func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) { +func NewReportEntry(name string, cl types.CodeLocation, args ...any) (ReportEntry, error) { out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 73e2655656..b4ecc7cb83 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -1,6 +1,7 @@ package testingtproxy import ( + "context" "fmt" "io" "os" @@ -19,9 +20,9 @@ type addReportEntryFunc func(names string, args ...any) type ginkgoWriterInterface interface { io.Writer - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) } type ginkgoRecoverFunc func() type attachProgressReporterFunc func(func() string) func() @@ -80,11 +81,31 @@ func (t *ginkgoTestingTProxy) Setenv(key, value string) { } } -func (t *ginkgoTestingTProxy) Error(args ...interface{}) { +func (t *ginkgoTestingTProxy) Chdir(dir string) { + currentDir, err := os.Getwd() + if err != nil { + t.fail(fmt.Sprintf("Failed to get current directory: %v", err), 1) + } + + t.cleanup(os.Chdir, currentDir, internal.Offset(1)) + + err = os.Chdir(dir) + if err != nil { + t.fail(fmt.Sprintf("Failed to change directory: %v", err), 1) + } +} + +func (t *ginkgoTestingTProxy) Context() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + t.cleanup(cancel, internal.Offset(1)) + return ctx +} + +func (t *ginkgoTestingTProxy) Error(args ...any) { t.fail(fmt.Sprintln(args...), t.offset) } -func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Errorf(format string, args ...any) { t.fail(fmt.Sprintf(format, args...), t.offset) } @@ -100,11 +121,11 @@ func (t *ginkgoTestingTProxy) Failed() bool { return t.report().Failed() } -func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { +func (t *ginkgoTestingTProxy) Fatal(args ...any) { t.fail(fmt.Sprintln(args...), t.offset) } -func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...any) { t.fail(fmt.Sprintf(format, args...), t.offset) } @@ -112,11 +133,11 @@ func (t *ginkgoTestingTProxy) Helper() { types.MarkAsHelper(1) } -func (t *ginkgoTestingTProxy) Log(args ...interface{}) { +func (t *ginkgoTestingTProxy) Log(args ...any) { fmt.Fprintln(t.writer, args...) } -func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Logf(format string, args ...any) { t.Log(fmt.Sprintf(format, args...)) } @@ -128,7 +149,7 @@ func (t *ginkgoTestingTProxy) Parallel() { // No-op } -func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { +func (t *ginkgoTestingTProxy) Skip(args ...any) { t.skip(fmt.Sprintln(args...), t.offset) } @@ -136,7 +157,7 @@ func (t *ginkgoTestingTProxy) SkipNow() { t.skip("skip", t.offset) } -func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Skipf(format string, args ...any) { t.skip(fmt.Sprintf(format, args...), t.offset) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index aab42d5fb3..1c4e0534e4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -121,15 +121,15 @@ func (w *Writer) ClearTeeWriters() { w.teeWriters = []io.Writer{} } -func (w *Writer) Print(a ...interface{}) { +func (w *Writer) Print(a ...any) { fmt.Fprint(w, a...) } -func (w *Writer) Printf(format string, a ...interface{}) { +func (w *Writer) Printf(format string, a ...any) { fmt.Fprintf(w, format, a...) } -func (w *Writer) Println(a ...interface{}) { +func (w *Writer) Println(a ...any) { fmt.Fprintln(w, a...) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 480730486a..74ad0768b7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -685,11 +685,11 @@ func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { } /* Rendering text */ -func (r *DefaultReporter) f(format string, args ...interface{}) string { +func (r *DefaultReporter) f(format string, args ...any) string { return r.formatter.F(format, args...) } -func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { +func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string { return r.formatter.Fi(indentation, format, args...) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index aa1a35176a..5bf2e62e90 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -60,7 +60,7 @@ AddReportEntry() must be called within a Subject or Setup node - not in a Contai You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports */ -func AddReportEntry(name string, args ...interface{}) { +func AddReportEntry(name string, args ...any) { cl := types.NewCodeLocation(1) reportEntry, err := internal.NewReportEntry(name, cl, args...) if err != nil { @@ -89,7 +89,7 @@ You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#g You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ func ReportBeforeEach(body any, args ...any) bool { - combinedArgs := []interface{}{body} + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) @@ -113,7 +113,7 @@ You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#ge You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ func ReportAfterEach(body any, args ...any) bool { - combinedArgs := []interface{}{body} + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) @@ -143,7 +143,7 @@ You can learn more about Ginkgo's reporting infrastructure, including generating You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ func ReportBeforeSuite(body any, args ...any) bool { - combinedArgs := []interface{}{body} + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } @@ -174,8 +174,8 @@ You can learn more about Ginkgo's reporting infrastructure, including generating You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body any, args ...interface{}) bool { - combinedArgs := []interface{}{body} +func ReportAfterSuite(text string, body any, args ...any) bool { + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index 9074a57ac9..b9e0ca9ef7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -23,7 +23,7 @@ You can learn more about generating EntryDescriptions here: https://onsi.github. */ type EntryDescription string -func (ed EntryDescription) render(args ...interface{}) string { +func (ed EntryDescription) render(args ...any) string { return fmt.Sprintf(string(ed), args...) } @@ -44,7 +44,7 @@ For example: You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ -func DescribeTable(description string, args ...interface{}) bool { +func DescribeTable(description string, args ...any) bool { GinkgoHelper() generateTable(description, false, args...) return true @@ -53,7 +53,7 @@ func DescribeTable(description string, args ...interface{}) bool { /* You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ -func FDescribeTable(description string, args ...interface{}) bool { +func FDescribeTable(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Focus) generateTable(description, false, args...) @@ -63,7 +63,7 @@ func FDescribeTable(description string, args ...interface{}) bool { /* You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ -func PDescribeTable(description string, args ...interface{}) bool { +func PDescribeTable(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Pending) generateTable(description, false, args...) @@ -109,7 +109,7 @@ Note that you **must** place define an It inside the body function. You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ -func DescribeTableSubtree(description string, args ...interface{}) bool { +func DescribeTableSubtree(description string, args ...any) bool { GinkgoHelper() generateTable(description, true, args...) return true @@ -118,7 +118,7 @@ func DescribeTableSubtree(description string, args ...interface{}) bool { /* You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`. */ -func FDescribeTableSubtree(description string, args ...interface{}) bool { +func FDescribeTableSubtree(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Focus) generateTable(description, true, args...) @@ -128,7 +128,7 @@ func FDescribeTableSubtree(description string, args ...interface{}) bool { /* You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`. */ -func PDescribeTableSubtree(description string, args ...interface{}) bool { +func PDescribeTableSubtree(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Pending) generateTable(description, true, args...) @@ -144,9 +144,9 @@ var XDescribeTableSubtree = PDescribeTableSubtree TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ type TableEntry struct { - description interface{} - decorations []interface{} - parameters []interface{} + description any + decorations []any + parameters []any codeLocation types.CodeLocation } @@ -162,7 +162,7 @@ If you want to generate interruptible specs simply write a Table function that a You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ -func Entry(description interface{}, args ...interface{}) TableEntry { +func Entry(description any, args ...any) TableEntry { GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} @@ -171,7 +171,7 @@ func Entry(description interface{}, args ...interface{}) TableEntry { /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ -func FEntry(description interface{}, args ...interface{}) TableEntry { +func FEntry(description any, args ...any) TableEntry { GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) @@ -181,7 +181,7 @@ func FEntry(description interface{}, args ...interface{}) TableEntry { /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ -func PEntry(description interface{}, args ...interface{}) TableEntry { +func PEntry(description any, args ...any) TableEntry { GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) @@ -196,17 +196,17 @@ var XEntry = PEntry var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func generateTable(description string, isSubtree bool, args ...interface{}) { +func generateTable(description string, isSubtree bool, args ...any) { GinkgoHelper() cl := types.NewCodeLocation(0) - containerNodeArgs := []interface{}{cl} + containerNodeArgs := []any{cl} entries := []TableEntry{} - var internalBody interface{} + var internalBody any var internalBodyType reflect.Type - var tableLevelEntryDescription interface{} - tableLevelEntryDescription = func(args ...interface{}) string { + var tableLevelEntryDescription any + tableLevelEntryDescription = func(args ...any) string { out := []string{} for _, arg := range args { out = append(out, fmt.Sprint(arg)) @@ -265,7 +265,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - internalNodeArgs := []interface{}{entry.codeLocation} + internalNodeArgs := []any{entry.codeLocation} internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false @@ -290,7 +290,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { if err != nil { panic(err) } - invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...)) + invokeFunction(internalBody, append([]any{c}, entry.parameters...)) }) if isSubtree { exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl)) @@ -316,7 +316,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) } -func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value { +func invokeFunction(function any, parameters []any) []reflect.Value { inValues := make([]reflect.Value, len(parameters)) funcType := reflect.TypeOf(function) @@ -339,7 +339,7 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va return reflect.ValueOf(function).Call(inValues) } -func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error { +func validateParameters(function any, parameters []any, kind string, cl types.CodeLocation, hasContext bool) error { funcType := reflect.TypeOf(function) limit := funcType.NumIn() offset := 0 @@ -377,7 +377,7 @@ func validateParameters(function interface{}, parameters []interface{}, kind str return nil } -func computeValue(parameter interface{}, t reflect.Type) reflect.Value { +func computeValue(parameter any, t reflect.Type) reflect.Value { if parameter == nil { return reflect.Zero(t) } else { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 8c0dfab8c0..2e827efe30 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -159,7 +159,7 @@ func (g CLIConfig) ComputedProcs() int { n := 1 if g.Parallel { - n = runtime.NumCPU() + n = runtime.GOMAXPROCS(-1) if n > 4 { n = n - 1 } @@ -172,7 +172,7 @@ func (g CLIConfig) ComputedNumCompilers() int { return g.NumCompilers } - return runtime.NumCPU() + return runtime.GOMAXPROCS(-1) } // Configuration for the Ginkgo CLI capturing available go flags @@ -231,6 +231,10 @@ func (g GoFlagsConfig) BinaryMustBePreserved() bool { return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" } +func (g GoFlagsConfig) NeedsSymbols() bool { + return g.BinaryMustBePreserved() +} + // Configuration that were deprecated in 2.0 type deprecatedConfig struct { DebugParallel bool @@ -257,8 +261,12 @@ var FlagSections = GinkgoFlagSections{ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, - {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, - {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis", + Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis", + Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, @@ -365,7 +373,7 @@ var ReporterConfigFlags = GinkgoFlags{ func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) flags = flags.WithPrefix("ginkgo") - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "D": &deprecatedConfig{}, @@ -515,7 +523,7 @@ var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", - Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", @@ -572,7 +580,7 @@ var GoBuildFlags = GinkgoFlags{ // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI var GoRunFlags = GinkgoFlags{ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", - Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`}, {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", @@ -600,6 +608,22 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) } + if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile)) + } + if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile)) + } + if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile)) + } + if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile)) + } + if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile)) + } + //initialize the output directory if cliConfig.OutputDir != "" { err := os.MkdirAll(cliConfig.OutputDir, 0777) @@ -620,7 +644,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -643,10 +667,14 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } + if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols { + goFlagsConfig.LDFlags = "-w -s" + } + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, - map[string]interface{}{ + map[string]any{ "Go": &goFlagsConfig, }, ) @@ -665,7 +693,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": &suiteConfig, "R": &reporterConfig, "Go": &goFlagsConfig, @@ -677,7 +705,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC // GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { flags := GoRunFlags.WithPrefix("test") - bindings := map[string]interface{}{ + bindings := map[string]any{ "Go": &goFlagsConfig, } @@ -699,7 +727,7 @@ func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterCo flags = flags.CopyAppend(GoBuildFlags...) flags = flags.CopyAppend(GoRunFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "C": cliConfig, @@ -720,7 +748,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter flags = flags.CopyAppend(GoBuildFlags...) flags = flags.CopyAppend(GoRunFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "C": cliConfig, @@ -736,7 +764,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "C": cliConfig, "Go": goFlagsConfig, "D": &deprecatedConfig{}, @@ -760,7 +788,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") - bindings := map[string]interface{}{ + bindings := map[string]any{ "C": cliConfig, } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go index 17922304b6..518989a844 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -113,7 +113,7 @@ type DeprecatedSpecFailure struct { type DeprecatedSpecMeasurement struct { Name string - Info interface{} + Info any Order int Results []float64 diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 6bb72d00cc..c2796b5490 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -88,7 +88,7 @@ body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, n } } -func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error { return GinkgoError{ Heading: "Assertion or Panic detected during tree construction", Message: formatter.F( @@ -189,7 +189,7 @@ func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl } } -func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error { return GinkgoError{ Heading: "Unknown Decorator", Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), @@ -345,7 +345,7 @@ func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { } /* ReportEntry errors */ -func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error { return GinkgoError{ Heading: "Too Many ReportEntry Values", Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), @@ -539,7 +539,7 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { /* Configuration errors */ -func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error { return GinkgoError{ Heading: "Unknown Type passed to RunSpecs", Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), @@ -629,6 +629,20 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error { } } +func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error { + return GinkgoError{ + Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path), + Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag), + } +} + +func (g ginkgoErrors) FlagAfterPositionalParameter() error { + return GinkgoError{ + Heading: "Malformed arguments - detected a flag after the package liste", + Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}", + } +} + /* Stack-Trace parsing errors */ func (g ginkgoErrors) FailedToParseStackTrace(message string) error { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index de69f3022d..8409653f97 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -92,7 +92,7 @@ func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { type GinkgoFlagSet struct { flags GinkgoFlags - bindings interface{} + bindings any sections GinkgoFlagSections extraGoFlagsSection GinkgoFlagSection @@ -101,7 +101,7 @@ type GinkgoFlagSet struct { } // Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet -func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { +func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) { return bindFlagSet(GinkgoFlagSet{ flags: flags, bindings: bindings, @@ -110,7 +110,7 @@ func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFl } // Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet -func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { return bindFlagSet(GinkgoFlagSet{ flags: flags, bindings: bindings, @@ -335,7 +335,7 @@ func (f GinkgoFlagSet) substituteUsage() { fmt.Fprintln(f.flagSet.Output(), f.Usage()) } -func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { +func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) { if len(keyPath) == 0 { return reflect.Value{}, false } @@ -433,7 +433,7 @@ func (ssv stringSliceVar) Set(s string) error { } // given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. -func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { +func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) { result := []string{} for _, flag := range flags { name := flag.ExportAs diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 7fdc8aa23f..40a909b6d5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -343,7 +343,7 @@ func tokenize(input string) func() (*treeNode, error) { consumeUntil := func(cutset string) (string, int) { j := i for ; j < len(runes); j++ { - if strings.IndexRune(cutset, runes[j]) >= 0 { + if strings.ContainsRune(cutset, runes[j]) { break } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index 7b1524b52e..63f7a9f6da 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -9,18 +9,18 @@ import ( // ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports // and across the network connection when running in parallel type ReportEntryValue struct { - raw interface{} //unexported to prevent gob from freaking out about unregistered structs + raw any //unexported to prevent gob from freaking out about unregistered structs AsJSON string Representation string } -func WrapEntryValue(value interface{}) ReportEntryValue { +func WrapEntryValue(value any) ReportEntryValue { return ReportEntryValue{ raw: value, } } -func (rev ReportEntryValue) GetRawValue() interface{} { +func (rev ReportEntryValue) GetRawValue() any { return rev.raw } @@ -118,7 +118,7 @@ func (entry ReportEntry) StringRepresentation() string { // If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be // a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON // field yourself. -func (entry ReportEntry) GetRawValue() interface{} { +func (entry ReportEntry) GetRawValue() any { return entry.Value.GetRawValue() } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 501af2fd99..158ac2fd89 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.22.1" +const VERSION = "2.23.4" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index a20d997cdd..de9c957cc6 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,43 @@ +## 1.38.0 + +### Features +- gstruct handles extra unexported fields [4ee7ed0] + +### Fixes +- support [] in IgnoringTopFunction function signatures (#851) [36bbf72] + +### Maintenance +- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408] +- Fix typo [acd1f55] +- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0] +- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f] +- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812] +- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9] +- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729] +- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1] +- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6] + +## 1.37.0 + +### Features +- add To/ToNot/NotTo aliases for AsyncAssertion [5666f98] + +## 1.36.3 + +### Maintenance + +- bump all the things [adb8b49] +- chore: replace `interface{}` with `any` [7613216] +- Bump google.golang.org/protobuf from 1.36.1 to 1.36.5 (#822) [9fe5259] +- remove spurious "toolchain" from go.mod (#819) [a0e85b9] +- Bump golang.org/x/net from 0.33.0 to 0.35.0 (#823) [604a8b1] +- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#772) [36fbc84] +- Bump github-pages from 231 to 232 in /docs (#778) [ced70d7] +- Bump rexml from 3.2.6 to 3.3.9 in /docs (#788) [c8b4a07] +- Bump github.com/onsi/ginkgo/v2 from 2.22.1 to 2.22.2 (#812) [06431b9] +- Bump webrick from 1.8.1 to 1.9.1 in /docs (#800) [b55a92d] +- Fix typos (#813) [a1d518b] + ## 1.36.2 ### Maintenance @@ -322,7 +362,7 @@ Require Go 1.22+ ### Features -Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. +Introducing [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. @@ -461,7 +501,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/ - Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497] - Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5] - Fix for Go 1.18 (#532) [56d2a29] -- Document precendence of timeouts (#533) [b607941] +- Document precedence of timeouts (#533) [b607941] ## 1.18.1 @@ -478,7 +518,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/ ## Fixes - Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0] -## Maintenace +## Maintenance - Remove Travis workflow (#491) [72e6040] - Upgrade to Ginkgo 2.0.0 GA [f383637] - chore: fix description of HaveField matcher (#487) [2b4b2c0] @@ -726,7 +766,7 @@ Improvements: - Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. - `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. -- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShouldNot(Receive()) always passes with a closed channel. - Added `HavePrefix` and `HaveSuffix` matchers. - `ghttp` can now handle concurrent requests. - Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. @@ -736,7 +776,7 @@ Improvements: - `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. - Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher - Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers -- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time. +- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the allotted time. - Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer. Bug Fixes: @@ -781,7 +821,7 @@ New Matchers: Updated Matchers: -- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an object that satisfies the passed-in matcher. - Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. Misc: diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6c1680638b..96f04b2104 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -57,7 +57,7 @@ var Indent = " " var longFormThreshold = 20 -// GomegaStringer allows for custom formating of objects for gomega. +// GomegaStringer allows for custom formatting of objects for gomega. type GomegaStringer interface { // GomegaString will be used to custom format an object. // It does not follow UseStringerRepresentation value and will always be called regardless. @@ -73,7 +73,7 @@ If the CustomFormatter does not want to handle the object it should return ("", Strings returned by CustomFormatters are not truncated */ -type CustomFormatter func(value interface{}) (string, bool) +type CustomFormatter func(value any) (string, bool) type CustomFormatterKey uint var customFormatterKey CustomFormatterKey = 1 @@ -125,7 +125,7 @@ If expected is omitted, then the message looks like: */ -func Message(actual interface{}, message string, expected ...interface{}) string { +func Message(actual any, message string, expected ...any) string { if len(expected) == 0 { return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) } @@ -255,7 +255,7 @@ recursing into the object. Set PrintContextObjects to true to print the content of objects implementing context.Context */ -func Object(object interface{}, indentation uint) string { +func Object(object any, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) commonRepresentation := "" @@ -392,7 +392,7 @@ func formatValue(value reflect.Value, indentation uint) string { } } -func formatString(object interface{}, indentation uint) string { +func formatString(object any, indentation uint) string { if indentation == 1 { s := fmt.Sprintf("%s", object) components := strings.Split(s, "\n") diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go index 6f77b23717..df34e552c6 100644 --- a/vendor/github.com/onsi/gomega/gbytes/buffer.go +++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go @@ -7,7 +7,6 @@ Subsequent matches against the buffer will only operate against data that appear The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always access the entire buffer's contents with Contents(). - */ package gbytes @@ -29,7 +28,7 @@ type Buffer struct { contents []byte readCursor uint64 lock *sync.Mutex - detectCloser chan interface{} + detectCloser chan any closed bool } @@ -167,19 +166,25 @@ You could do something like: select { case <-buffer.Detect("You are not logged in"): + //log in + case <-buffer.Detect("Success"): + //carry on + case <-time.After(time.Second): - //welp -} + + //welp + } + buffer.CancelDetects() You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. */ -func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { +func (b *Buffer) Detect(desired string, args ...any) chan bool { formattedRegexp := desired if len(args) > 0 { formattedRegexp = fmt.Sprintf(desired, args...) @@ -190,7 +195,7 @@ func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { defer b.lock.Unlock() if b.detectCloser == nil { - b.detectCloser = make(chan interface{}) + b.detectCloser = make(chan any) } closer := b.detectCloser diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go index 0763f5e2d5..2861a9c20f 100644 --- a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -9,7 +9,7 @@ import ( "github.com/onsi/gomega/format" ) -//Objects satisfying the BufferProvider can be used with the Say matcher. +// Objects satisfying the BufferProvider can be used with the Say matcher. type BufferProvider interface { Buffer() *Buffer } @@ -37,7 +37,7 @@ In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() If the buffer is closed, the Say matcher will tell Eventually to abort. */ -func Say(expected string, args ...interface{}) *sayMatcher { +func Say(expected string, args ...any) *sayMatcher { if len(args) > 0 { expected = fmt.Sprintf(expected, args...) } @@ -51,7 +51,7 @@ type sayMatcher struct { receivedSayings []byte } -func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { +func (m *sayMatcher) buffer(actual any) (*Buffer, bool) { var buffer *Buffer switch x := actual.(type) { @@ -66,7 +66,7 @@ func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { return buffer, true } -func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { +func (m *sayMatcher) Match(actual any) (success bool, err error) { buffer, ok := m.buffer(actual) if !ok { return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) @@ -78,7 +78,7 @@ func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { return didSay, nil } -func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { +func (m *sayMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf( "Got stuck at:\n%s\nWaiting for:\n%s", format.IndentString(string(m.receivedSayings), 1), @@ -86,7 +86,7 @@ func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { ) } -func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *sayMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf( "Saw:\n%s\nWhich matches the unexpected:\n%s", format.IndentString(string(m.receivedSayings), 1), @@ -94,7 +94,7 @@ func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) ) } -func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *sayMatcher) MatchMayChangeInTheFuture(actual any) bool { switch x := actual.(type) { case *Buffer: return !x.Closed() diff --git a/vendor/github.com/onsi/gomega/gcustom/make_matcher.go b/vendor/github.com/onsi/gomega/gcustom/make_matcher.go index 5372fa441e..ca556ceb00 100644 --- a/vendor/github.com/onsi/gomega/gcustom/make_matcher.go +++ b/vendor/github.com/onsi/gomega/gcustom/make_matcher.go @@ -12,7 +12,7 @@ import ( "github.com/onsi/gomega/format" ) -var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var interfaceType = reflect.TypeOf((*any)(nil)).Elem() var errInterface = reflect.TypeOf((*error)(nil)).Elem() var defaultTemplate = template.Must(ParseTemplate("{{if .Failure}}Custom matcher failed for:{{else}}Custom matcher succeeded (but was expected to fail) for:{{end}}\n{{.FormattedActual}}")) @@ -191,7 +191,7 @@ func (c CustomGomegaMatcher) WithTemplate(templ string, data ...any) CustomGomeg /* WithPrecompiledTemplate returns a CustomGomegaMatcher configured to use the passed-in template. The template should be precompiled with gcustom.ParseTemplate(). -As with WithTemplate() you can provide a single pice of additional data as an optional argument. This is accessed in the template via {{.Data}} +As with WithTemplate() you can provide a single piece of additional data as an optional argument. This is accessed in the template via {{.Data}} */ func (c CustomGomegaMatcher) WithPrecompiledTemplate(templ *template.Template, data ...any) CustomGomegaMatcher { c.templateMessage = templ diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go index 6e70de68d5..11fa004b72 100644 --- a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -43,7 +43,7 @@ type Exiter interface { ExitCode() int } -func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { +func (m *exitMatcher) Match(actual any) (success bool, err error) { exiter, ok := actual.(Exiter) if !ok { return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) @@ -61,14 +61,14 @@ func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { return m.exitCode == m.actualExitCode, nil } -func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { +func (m *exitMatcher) FailureMessage(actual any) (message string) { if m.actualExitCode == -1 { return "Expected process to exit. It did not." } return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) } -func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *exitMatcher) NegatedFailureMessage(actual any) (message string) { if m.actualExitCode == -1 { return "you really shouldn't be able to see this!" } else { @@ -79,7 +79,7 @@ func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) } } -func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *exitMatcher) MatchMayChangeInTheFuture(actual any) bool { session, ok := actual.(*Session) if ok { return session.ExitCode() == -1 diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go index be4877ac90..1e381df1c3 100644 --- a/vendor/github.com/onsi/gomega/gexec/session.go +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -140,7 +140,7 @@ will wait for the command to exit then return the entirety of Out's contents. Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. */ -func (s *Session) Wait(timeout ...interface{}) *Session { +func (s *Session) Wait(timeout ...any) *Session { EventuallyWithOffset(1, s, timeout...).Should(Exit()) return s } @@ -225,7 +225,7 @@ The timeout specified is applied to each process killed. If any of the processes already exited, KillAndWait returns silently. */ -func KillAndWait(timeout ...interface{}) { +func KillAndWait(timeout ...any) { trackedSessionsMutex.Lock() defer trackedSessionsMutex.Unlock() for _, session := range trackedSessions { @@ -240,7 +240,7 @@ The timeout specified is applied to each process killed. If any of the processes already exited, TerminateAndWait returns silently. */ -func TerminateAndWait(timeout ...interface{}) { +func TerminateAndWait(timeout ...any) { trackedSessionsMutex.Lock() defer trackedSessionsMutex.Unlock() for _, session := range trackedSessions { diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 9a028f3f36..1f03e1f228 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.36.2" +const GOMEGA_VERSION = "1.38.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() { // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: // @@ -191,7 +191,7 @@ func ensureDefaultGomegaIsConfigured() { // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Ω and Expect are identical -func Ω(actual interface{}, extra ...interface{}) Assertion { +func Ω(actual any, extra ...any) Assertion { ensureDefaultGomegaIsConfigured() return Default.Ω(actual, extra...) } @@ -217,7 +217,7 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Expect and Ω are identical -func Expect(actual interface{}, extra ...interface{}) Assertion { +func Expect(actual any, extra ...any) Assertion { ensureDefaultGomegaIsConfigured() return Default.Expect(actual, extra...) } @@ -233,7 +233,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { // This is most useful in helper functions that make assertions. If you want Gomega's // error message to refer to the calling line in the test (as opposed to the line in the helper function) // set the first argument of `ExpectWithOffset` appropriately. -func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { +func ExpectWithOffset(offset int, actual any, extra ...any) Assertion { ensureDefaultGomegaIsConfigured() return Default.ExpectWithOffset(offset, actual, extra...) } @@ -319,19 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context passed to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) -now either the context cacnellation or the timeout will cause Eventually to stop polling. +now either the context cancellation or the timeout will cause Eventually to stop polling. If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: EnforceDefaultTimeoutsWhenUsingContexts() -in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if either the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -390,7 +390,7 @@ is equivalent to Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.Eventually(actualOrCtx, args...) } @@ -404,7 +404,7 @@ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } @@ -424,7 +424,7 @@ Consistently is useful in cases where you want to assert that something *does no This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.Consistently(actualOrCtx, args...) } @@ -435,13 +435,13 @@ func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } /* -StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. +StopTrying can be used to signal to Eventually and Consistently that they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution. diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go index b5e5ef2e45..4a6114ab8f 100644 --- a/vendor/github.com/onsi/gomega/gstruct/elements.go +++ b/vendor/github.com/onsi/gomega/gstruct/elements.go @@ -14,16 +14,17 @@ import ( "github.com/onsi/gomega/types" ) -//MatchAllElements succeeds if every element of a slice matches the element matcher it maps to -//through the id function, and every element matcher is matched. -// idFn := func(element interface{}) string { -// return fmt.Sprintf("%v", element) -// } +// MatchAllElements succeeds if every element of a slice matches the element matcher it maps to +// through the id function, and every element matcher is matched. // -// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ -// "a": Equal("a"), -// "b": Equal("b"), -// })) +// idFn := func(element any) string { +// return fmt.Sprintf("%v", element) +// } +// +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ +// "a": Equal("a"), +// "b": Equal("b"), +// })) func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher { return &ElementsMatcher{ Identifier: identifier, @@ -31,16 +32,17 @@ func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatc } } -//MatchAllElementsWithIndex succeeds if every element of a slice matches the element matcher it maps to -//through the id with index function, and every element matcher is matched. -// idFn := func(index int, element interface{}) string { -// return strconv.Itoa(index) -// } +// MatchAllElementsWithIndex succeeds if every element of a slice matches the element matcher it maps to +// through the id with index function, and every element matcher is matched. +// +// idFn := func(index int, element any) string { +// return strconv.Itoa(index) +// } // -// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ -// "0": Equal("a"), -// "1": Equal("b"), -// })) +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// })) func MatchAllElementsWithIndex(identifier IdentifierWithIndex, elements Elements) types.GomegaMatcher { return &ElementsMatcher{ Identifier: identifier, @@ -48,22 +50,23 @@ func MatchAllElementsWithIndex(identifier IdentifierWithIndex, elements Elements } } -//MatchElements succeeds if each element of a slice matches the element matcher it maps to -//through the id function. It can ignore extra elements and/or missing elements. -// idFn := func(element interface{}) string { -// return fmt.Sprintf("%v", element) -// } +// MatchElements succeeds if each element of a slice matches the element matcher it maps to +// through the id function. It can ignore extra elements and/or missing elements. // -// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ -// "a": Equal("a"), -// "b": Equal("b"), -// })) -// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ -// "a": Equal("a"), -// "b": Equal("b"), -// "c": Equal("c"), -// "d": Equal("d"), -// })) +// idFn := func(element any) string { +// return fmt.Sprintf("%v", element) +// } +// +// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ +// "a": Equal("a"), +// "b": Equal("b"), +// })) +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ +// "a": Equal("a"), +// "b": Equal("b"), +// "c": Equal("c"), +// "d": Equal("d"), +// })) func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher { return &ElementsMatcher{ Identifier: identifier, @@ -74,22 +77,23 @@ func MatchElements(identifier Identifier, options Options, elements Elements) ty } } -//MatchElementsWithIndex succeeds if each element of a slice matches the element matcher it maps to -//through the id with index function. It can ignore extra elements and/or missing elements. -// idFn := func(index int, element interface{}) string { -// return strconv.Itoa(index) -// } +// MatchElementsWithIndex succeeds if each element of a slice matches the element matcher it maps to +// through the id with index function. It can ignore extra elements and/or missing elements. +// +// idFn := func(index int, element any) string { +// return strconv.Itoa(index) +// } // -// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ -// "0": Equal("a"), -// "1": Equal("b"), -// })) -// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ -// "0": Equal("a"), -// "1": Equal("b"), -// "2": Equal("c"), -// "3": Equal("d"), -// })) +// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// })) +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// "2": Equal("c"), +// "3": Equal("d"), +// })) func MatchElementsWithIndex(identifier IdentifierWithIndex, options Options, elements Elements) types.GomegaMatcher { return &ElementsMatcher{ Identifier: identifier, @@ -124,35 +128,35 @@ type ElementsMatcher struct { type Elements map[string]types.GomegaMatcher // Function for identifying (mapping) elements. -type Identifier func(element interface{}) string +type Identifier func(element any) string -// Calls the underlying fucntion with the provided params. +// Calls the underlying function with the provided params. // Identifier drops the index. -func (i Identifier) WithIndexAndElement(index int, element interface{}) string { +func (i Identifier) WithIndexAndElement(index int, element any) string { return i(element) } // Uses the index and element to generate an element name -type IdentifierWithIndex func(index int, element interface{}) string +type IdentifierWithIndex func(index int, element any) string -// Calls the underlying fucntion with the provided params. +// Calls the underlying function with the provided params. // IdentifierWithIndex uses the index. -func (i IdentifierWithIndex) WithIndexAndElement(index int, element interface{}) string { +func (i IdentifierWithIndex) WithIndexAndElement(index int, element any) string { return i(index, element) } -// Interface for identifing the element +// Interface for identifying the element type Identify interface { - WithIndexAndElement(i int, element interface{}) string + WithIndexAndElement(i int, element any) string } // IndexIdentity is a helper function for using an index as // the key in the element map -func IndexIdentity(index int, _ interface{}) string { +func IndexIdentity(index int, _ any) string { return strconv.Itoa(index) } -func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) { +func (m *ElementsMatcher) Match(actual any) (success bool, err error) { if reflect.TypeOf(actual).Kind() != reflect.Slice { return false, fmt.Errorf("%v is type %T, expected slice", actual, actual) } @@ -164,7 +168,7 @@ func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) { return true, nil } -func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) { +func (m *ElementsMatcher) matchElements(actual any) (errs []error) { // Provide more useful error messages in the case of a panic. defer func() { if err := recover(); err != nil { @@ -217,12 +221,12 @@ func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) { return errs } -func (m *ElementsMatcher) FailureMessage(actual interface{}) (message string) { +func (m *ElementsMatcher) FailureMessage(actual any) (message string) { failure := errorsutil.AggregateError(m.failures) return format.Message(actual, fmt.Sprintf("to match elements: %v", failure)) } -func (m *ElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *ElementsMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to match elements") } diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go index faf07b1a25..2f4685fc48 100644 --- a/vendor/github.com/onsi/gomega/gstruct/fields.go +++ b/vendor/github.com/onsi/gomega/gstruct/fields.go @@ -8,61 +8,65 @@ import ( "reflect" "runtime/debug" "strings" + "unicode" "github.com/onsi/gomega/format" errorsutil "github.com/onsi/gomega/gstruct/errors" "github.com/onsi/gomega/types" ) -//MatchAllFields succeeds if every field of a struct matches the field matcher associated with -//it, and every element matcher is matched. -// actual := struct{ -// A int -// B []bool -// C string -// }{ -// A: 5, -// B: []bool{true, false}, -// C: "foo", -// } +// MatchAllFields succeeds if every field of a struct matches the field matcher associated with +// it, and every element matcher is matched. // -// Expect(actual).To(MatchAllFields(Fields{ -// "A": Equal(5), -// "B": ConsistOf(true, false), -// "C": Equal("foo"), -// })) +// actual := struct{ +// A int +// B []bool +// C string +// }{ +// A: 5, +// B: []bool{true, false}, +// C: "foo", +// } +// +// Expect(actual).To(MatchAllFields(Fields{ +// "A": Equal(5), +// "B": ConsistOf(true, false), +// "C": Equal("foo"), +// })) func MatchAllFields(fields Fields) types.GomegaMatcher { return &FieldsMatcher{ Fields: fields, } } -//MatchFields succeeds if each element of a struct matches the field matcher associated with -//it. It can ignore extra fields and/or missing fields. -// actual := struct{ -// A int -// B []bool -// C string -// }{ -// A: 5, -// B: []bool{true, false}, -// C: "foo", -// } +// MatchFields succeeds if each element of a struct matches the field matcher associated with +// it. It can ignore extra fields and/or missing fields. +// +// actual := struct{ +// A int +// B []bool +// C string +// }{ +// A: 5, +// B: []bool{true, false}, +// C: "foo", +// } // -// Expect(actual).To(MatchFields(IgnoreExtras, Fields{ -// "A": Equal(5), -// "B": ConsistOf(true, false), -// })) -// Expect(actual).To(MatchFields(IgnoreMissing, Fields{ -// "A": Equal(5), -// "B": ConsistOf(true, false), -// "C": Equal("foo"), -// "D": Equal("extra"), -// })) +// Expect(actual).To(MatchFields(IgnoreExtras, Fields{ +// "A": Equal(5), +// "B": ConsistOf(true, false), +// })) +// Expect(actual).To(MatchFields(IgnoreMissing, Fields{ +// "A": Equal(5), +// "B": ConsistOf(true, false), +// "C": Equal("foo"), +// "D": Equal("extra"), +// })) func MatchFields(options Options, fields Fields) types.GomegaMatcher { return &FieldsMatcher{ Fields: fields, IgnoreExtras: options&IgnoreExtras != 0, + IgnoreUnexportedExtras: options&IgnoreUnexportedExtras != 0, IgnoreMissing: options&IgnoreMissing != 0, } } @@ -73,6 +77,8 @@ type FieldsMatcher struct { // Whether to ignore extra elements or consider it an error. IgnoreExtras bool + // Whether to ignore unexported extra elements or consider it an error. + IgnoreUnexportedExtras bool // Whether to ignore missing elements or consider it an error. IgnoreMissing bool @@ -83,7 +89,7 @@ type FieldsMatcher struct { // Field name to matcher. type Fields map[string]types.GomegaMatcher -func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) { +func (m *FieldsMatcher) Match(actual any) (success bool, err error) { if reflect.TypeOf(actual).Kind() != reflect.Struct { return false, fmt.Errorf("%v is type %T, expected struct", actual, actual) } @@ -95,7 +101,15 @@ func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) { return true, nil } -func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) { +func isExported(fieldName string) bool { + if fieldName == "" { + return false + } + r := []rune(fieldName)[0] + return unicode.IsUpper(r) +} + +func (m *FieldsMatcher) matchFields(actual any) (errs []error) { val := reflect.ValueOf(actual) typ := val.Type() fields := map[string]bool{} @@ -114,13 +128,21 @@ func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) { matcher, expected := m.Fields[fieldName] if !expected { + if m.IgnoreUnexportedExtras && !isExported(fieldName) { + return nil + } if !m.IgnoreExtras { return fmt.Errorf("unexpected field %s: %+v", fieldName, actual) } return nil } - field := val.Field(i).Interface() + var field any + if _, isIgnoreMatcher := matcher.(*IgnoreMatcher) ; isIgnoreMatcher { + field = struct {}{} // the matcher does not care about the actual value + } else { + field = val.Field(i).Interface() + } match, err := matcher.Match(field) if err != nil { @@ -147,7 +169,7 @@ func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) { return errs } -func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) { +func (m *FieldsMatcher) FailureMessage(actual any) (message string) { failures := make([]string, len(m.failures)) for i := range m.failures { failures[i] = m.failures[i].Error() @@ -156,7 +178,7 @@ func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) { fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n"))) } -func (m *FieldsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *FieldsMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to match fields") } diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore.go b/vendor/github.com/onsi/gomega/gstruct/ignore.go index 4396573e44..c8238d58fd 100644 --- a/vendor/github.com/onsi/gomega/gstruct/ignore.go +++ b/vendor/github.com/onsi/gomega/gstruct/ignore.go @@ -6,17 +6,19 @@ import ( "github.com/onsi/gomega/types" ) -//Ignore ignores the actual value and always succeeds. -// Expect(nil).To(Ignore()) -// Expect(true).To(Ignore()) +// Ignore ignores the actual value and always succeeds. +// +// Expect(nil).To(Ignore()) +// Expect(true).To(Ignore()) func Ignore() types.GomegaMatcher { return &IgnoreMatcher{true} } -//Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing -//to catch problematic elements, or to verify tests are running. -// Expect(nil).NotTo(Reject()) -// Expect(true).NotTo(Reject()) +// Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing +// to catch problematic elements, or to verify tests are running. +// +// Expect(nil).NotTo(Reject()) +// Expect(true).NotTo(Reject()) func Reject() types.GomegaMatcher { return &IgnoreMatcher{false} } @@ -26,14 +28,14 @@ type IgnoreMatcher struct { Succeed bool } -func (m *IgnoreMatcher) Match(actual interface{}) (bool, error) { +func (m *IgnoreMatcher) Match(actual any) (bool, error) { return m.Succeed, nil } -func (m *IgnoreMatcher) FailureMessage(_ interface{}) (message string) { +func (m *IgnoreMatcher) FailureMessage(_ any) (message string) { return "Unconditional failure" } -func (m *IgnoreMatcher) NegatedFailureMessage(_ interface{}) (message string) { +func (m *IgnoreMatcher) NegatedFailureMessage(_ any) (message string) { return "Unconditional success" } diff --git a/vendor/github.com/onsi/gomega/gstruct/keys.go b/vendor/github.com/onsi/gomega/gstruct/keys.go index 56aed4bab7..807a450b6f 100644 --- a/vendor/github.com/onsi/gomega/gstruct/keys.go +++ b/vendor/github.com/onsi/gomega/gstruct/keys.go @@ -41,9 +41,9 @@ type KeysMatcher struct { failures []error } -type Keys map[interface{}]types.GomegaMatcher +type Keys map[any]types.GomegaMatcher -func (m *KeysMatcher) Match(actual interface{}) (success bool, err error) { +func (m *KeysMatcher) Match(actual any) (success bool, err error) { if reflect.TypeOf(actual).Kind() != reflect.Map { return false, fmt.Errorf("%v is type %T, expected map", actual, actual) } @@ -55,9 +55,9 @@ func (m *KeysMatcher) Match(actual interface{}) (success bool, err error) { return true, nil } -func (m *KeysMatcher) matchKeys(actual interface{}) (errs []error) { +func (m *KeysMatcher) matchKeys(actual any) (errs []error) { actualValue := reflect.ValueOf(actual) - keys := map[interface{}]bool{} + keys := map[any]bool{} for _, keyValue := range actualValue.MapKeys() { key := keyValue.Interface() keys[key] = true @@ -108,7 +108,7 @@ func (m *KeysMatcher) matchKeys(actual interface{}) (errs []error) { return errs } -func (m *KeysMatcher) FailureMessage(actual interface{}) (message string) { +func (m *KeysMatcher) FailureMessage(actual any) (message string) { failures := make([]string, len(m.failures)) for i := range m.failures { failures[i] = m.failures[i].Error() @@ -117,7 +117,7 @@ func (m *KeysMatcher) FailureMessage(actual interface{}) (message string) { fmt.Sprintf("to match keys: {\n%v\n}\n", strings.Join(failures, "\n"))) } -func (m *KeysMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *KeysMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to match keys") } diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer.go b/vendor/github.com/onsi/gomega/gstruct/pointer.go index cc828a3251..54c9557fec 100644 --- a/vendor/github.com/onsi/gomega/gstruct/pointer.go +++ b/vendor/github.com/onsi/gomega/gstruct/pointer.go @@ -10,10 +10,11 @@ import ( "github.com/onsi/gomega/types" ) -//PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is -//nil. -// actual := 5 -// Expect(&actual).To(PointTo(Equal(5))) +// PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is +// nil. +// +// actual := 5 +// Expect(&actual).To(PointTo(Equal(5))) func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher { return &PointerMatcher{ Matcher: matcher, @@ -27,7 +28,7 @@ type PointerMatcher struct { failure string } -func (m *PointerMatcher) Match(actual interface{}) (bool, error) { +func (m *PointerMatcher) Match(actual any) (bool, error) { val := reflect.ValueOf(actual) // return error if actual type is not a pointer @@ -49,10 +50,10 @@ func (m *PointerMatcher) Match(actual interface{}) (bool, error) { return match, err } -func (m *PointerMatcher) FailureMessage(_ interface{}) (message string) { +func (m *PointerMatcher) FailureMessage(_ any) (message string) { return m.failure } -func (m *PointerMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *PointerMatcher) NegatedFailureMessage(actual any) (message string) { return m.Matcher.NegatedFailureMessage(actual) } diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go index 48cbbe8f66..a5f6c390bd 100644 --- a/vendor/github.com/onsi/gomega/gstruct/types.go +++ b/vendor/github.com/onsi/gomega/gstruct/types.go @@ -1,6 +1,6 @@ package gstruct -//Options is the type for options passed to some matchers. +// Options is the type for options passed to some matchers. type Options int const ( @@ -9,7 +9,11 @@ const ( //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure. IgnoreMissing //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when - //considered by the indentifier function. All members that map to a given key must still match successfully + //considered by the identifier function. All members that map to a given key must still match successfully //with the matcher that is provided for that key. AllowDuplicates + //IgnoreUnexportedExtras tells the matcher to ignore extra unexported fields, rather than triggering a failure. + //it is not possible to check the value of unexported fields, so this option is only useful when you want to + //check every exported fields, but you don't care about extra unexported fields. + IgnoreUnexportedExtras ) diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go index 08356a610b..cc846e7ce7 100644 --- a/vendor/github.com/onsi/gomega/internal/assertion.go +++ b/vendor/github.com/onsi/gomega/internal/assertion.go @@ -9,19 +9,19 @@ import ( ) type Assertion struct { - actuals []interface{} // actual value plus all extra values - actualIndex int // value to pass to the matcher - vet vetinari // the vet to call before calling Gomega matcher + actuals []any // actual value plus all extra values + actualIndex int // value to pass to the matcher + vet vetinari // the vet to call before calling Gomega matcher offset int g *Gomega } // ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right. -type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool +type vetinari func(assertion *Assertion, optionalDescription ...any) bool -func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion { +func NewAssertion(actualInput any, g *Gomega, offset int, extra ...any) *Assertion { return &Assertion{ - actuals: append([]interface{}{actualInput}, extra...), + actuals: append([]any{actualInput}, extra...), actualIndex: 0, vet: (*Assertion).vetActuals, offset: offset, @@ -44,37 +44,37 @@ func (assertion *Assertion) Error() types.Assertion { } } -func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } -func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } -func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } -func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } -func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } -func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { +func (assertion *Assertion) buildDescription(optionalDescription ...any) string { switch len(optionalDescription) { case 0: return "" @@ -86,7 +86,7 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool { actualInput := assertion.actuals[assertion.actualIndex] matches, err := matcher.Match(actualInput) assertion.g.THelper() @@ -113,7 +113,7 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool // vetActuals vets the actual values, with the (optional) exception of a // specific value, such as the first value in case non-error assertions, or the // last value in case of Error()-based assertions. -func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool { +func (assertion *Assertion) vetActuals(optionalDescription ...any) bool { success, message := vetActuals(assertion.actuals, assertion.actualIndex) if success { return true @@ -129,7 +129,7 @@ func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool // the final error value is non-zero. Otherwise, it doesn't vet the actual // values, as these are allowed to take on any values unless there is a non-zero // error value. -func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool { +func (assertion *Assertion) vetError(optionalDescription ...any) bool { if err := assertion.actuals[assertion.actualIndex]; err != nil { // Go error result idiom: all other actual values must be zero values. return assertion.vetActuals(optionalDescription...) @@ -139,7 +139,7 @@ func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool { // vetActuals vets a slice of actual values, optionally skipping a particular // value slice element, such as the first or last value slice element. -func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { +func vetActuals(actuals []any, skipIndex int) (bool, string) { for i, actual := range actuals { if i == skipIndex { continue diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 8b4cd1f5b7..a3a646e4ad 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -69,8 +69,8 @@ type AsyncAssertion struct { asyncType AsyncAssertionType actualIsFunc bool - actual interface{} - argsToForward []interface{} + actual any + argsToForward []any timeoutInterval time.Duration pollingInterval time.Duration @@ -80,7 +80,7 @@ type AsyncAssertion struct { g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput any, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ asyncType: asyncType, timeoutInterval: timeoutInterval, @@ -129,7 +129,7 @@ func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAss return assertion } -func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion { +func (assertion *AsyncAssertion) WithArguments(argsToForward ...any) types.AsyncAssertion { assertion.argsToForward = argsToForward return assertion } @@ -139,19 +139,31 @@ func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssert return assertion } -func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, true, optionalDescription...) } -func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *AsyncAssertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool { + return assertion.Should(matcher, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, false, optionalDescription...) } -func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { +func (assertion *AsyncAssertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { + return assertion.ShouldNot(matcher, optionalDescription...) +} + +func (assertion *AsyncAssertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool { + return assertion.ShouldNot(matcher, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...any) string { switch len(optionalDescription) { case 0: return "" @@ -163,7 +175,7 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { +func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (any, error) { if len(values) == 0 { return nil, &asyncPolledActualError{ message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType), @@ -224,7 +236,7 @@ func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvid if numProvided == 1 { have = "has" } - return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments. + return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the correct set of arguments. You can learn more at https://onsi.github.io/gomega/#eventually `, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) @@ -237,9 +249,9 @@ You can learn more at https://onsi.github.io/gomega/#eventually `, assertion.asyncType, reason) } -func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { +func (assertion *AsyncAssertion) buildActualPoller() (func() (any, error), error) { if !assertion.actualIsFunc { - return func() (interface{}, error) { return assertion.actual, nil }, nil + return func() (any, error) { return assertion.actual, nil }, nil } actualValue := reflect.ValueOf(assertion.actual) actualType := reflect.TypeOf(assertion.actual) @@ -301,7 +313,7 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1") } - return func() (actual interface{}, err error) { + return func() (actual any, err error) { var values []reflect.Value assertionFailure = nil defer func() { @@ -354,14 +366,14 @@ func (assertion *AsyncAssertion) afterPolling() <-chan time.Time { } } -func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool { +func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value any) bool { if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) { return false } return true } -func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) { +func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value any) (matches bool, err error) { defer func() { if e := recover(); e != nil { if _, isAsyncError := AsPollingSignalError(e); isAsyncError { @@ -377,13 +389,13 @@ func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value return } -func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool { timer := time.Now() timeout := assertion.afterTimeout() lock := sync.Mutex{} var matches, hasLastValidActual bool - var actual, lastValidActual interface{} + var actual, lastValidActual any var actualErr, matcherErr error var oracleMatcherSaysStop bool diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 2e026c336f..1019deb88e 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -49,7 +49,7 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { return duration } -func toDuration(input interface{}) (time.Duration, error) { +func toDuration(input any) (time.Duration, error) { duration, ok := input.(time.Duration) if ok { return duration, nil diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index c6e2fcc0ed..66dfe7d041 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -40,45 +40,45 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega { return g } -func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion { +func (g *Gomega) Ω(actual any, extra ...any) types.Assertion { return g.ExpectWithOffset(0, actual, extra...) } -func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion { +func (g *Gomega) Expect(actual any, extra ...any) types.Assertion { return g.ExpectWithOffset(0, actual, extra...) } -func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion { +func (g *Gomega) ExpectWithOffset(offset int, actual any, extra ...any) types.Assertion { return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) Eventually(actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) } -func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) Consistently(actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) } -func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx any, args ...any) types.AsyncAssertion { baseOffset := 3 timeoutInterval := -time.Duration(1) pollingInterval := -time.Duration(1) - intervals := []interface{}{} + intervals := []any{} var ctx context.Context actual := actualOrCtx startingIndex := 0 if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { - // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration + // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argument **and** the second argument is not a parseable duration // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual if _, err := toDuration(args[0]); err != nil { ctx = actualOrCtx.(context.Context) diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 3a4f7ddd90..450c403330 100644 --- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -100,7 +100,7 @@ func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration { return s.duration } -func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) { +func AsPollingSignalError(actual any) (*PollingSignalErrorImpl, bool) { if actual == nil { return nil, false } diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go index f295876417..b748de41f1 100644 --- a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go +++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go @@ -10,7 +10,7 @@ import ( // Gomega matcher at the beginning it panics. This allows for rendering Gomega // matchers as part of an optional Description, as long as they're not in the // first slot. -func vetOptionalDescription(assertion string, optionalDescription ...interface{}) { +func vetOptionalDescription(assertion string, optionalDescription ...any) { if len(optionalDescription) == 0 { return } diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 7ef27dc9c9..10b6693fd6 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -12,7 +12,7 @@ import ( // Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about // types when performing comparisons. // It is an error for both actual and expected to be nil. Use BeNil() instead. -func Equal(expected interface{}) types.GomegaMatcher { +func Equal(expected any) types.GomegaMatcher { return &matchers.EqualMatcher{ Expected: expected, } @@ -22,7 +22,7 @@ func Equal(expected interface{}) types.GomegaMatcher { // This is done by converting actual to have the type of expected before // attempting equality with reflect.DeepEqual. // It is an error for actual and expected to be nil. Use BeNil() instead. -func BeEquivalentTo(expected interface{}) types.GomegaMatcher { +func BeEquivalentTo(expected any) types.GomegaMatcher { return &matchers.BeEquivalentToMatcher{ Expected: expected, } @@ -31,7 +31,7 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher { // BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. // You can pass cmp.Option as options. // It is an error for actual and expected to be nil. Use BeNil() instead. -func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { +func BeComparableTo(expected any, opts ...cmp.Option) types.GomegaMatcher { return &matchers.BeComparableToMatcher{ Expected: expected, Options: opts, @@ -41,7 +41,7 @@ func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatche // BeIdenticalTo uses the == operator to compare actual with expected. // BeIdenticalTo is strict about types when performing comparisons. // It is an error for both actual and expected to be nil. Use BeNil() instead. -func BeIdenticalTo(expected interface{}) types.GomegaMatcher { +func BeIdenticalTo(expected any) types.GomegaMatcher { return &matchers.BeIdenticalToMatcher{ Expected: expected, } @@ -139,7 +139,7 @@ func Succeed() types.GomegaMatcher { // Error interface // // The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases. -func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher { +func MatchError(expected any, functionErrorDescription ...any) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ Expected: expected, FuncErrDescription: functionErrorDescription, @@ -202,11 +202,11 @@ func BeClosed() types.GomegaMatcher { // Expect(myThing.IsValid()).Should(BeTrue()) // // Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, -// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// you can pass a pointer to a variable of the appropriate type first, and second a matcher: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) -func Receive(args ...interface{}) types.GomegaMatcher { +func Receive(args ...any) types.GomegaMatcher { return &matchers.ReceiveMatcher{ Args: args, } @@ -224,7 +224,7 @@ func Receive(args ...interface{}) types.GomegaMatcher { // // Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). // Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. -func BeSent(arg interface{}) types.GomegaMatcher { +func BeSent(arg any) types.GomegaMatcher { return &matchers.BeSentMatcher{ Arg: arg, } @@ -233,7 +233,7 @@ func BeSent(arg interface{}) types.GomegaMatcher { // MatchRegexp succeeds if actual is a string or stringer that matches the // passed-in regexp. Optional arguments can be provided to construct a regexp // via fmt.Sprintf(). -func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { +func MatchRegexp(regexp string, args ...any) types.GomegaMatcher { return &matchers.MatchRegexpMatcher{ Regexp: regexp, Args: args, @@ -243,7 +243,7 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { // ContainSubstring succeeds if actual is a string or stringer that contains the // passed-in substring. Optional arguments can be provided to construct the substring // via fmt.Sprintf(). -func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { +func ContainSubstring(substr string, args ...any) types.GomegaMatcher { return &matchers.ContainSubstringMatcher{ Substr: substr, Args: args, @@ -253,7 +253,7 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { // HavePrefix succeeds if actual is a string or stringer that contains the // passed-in string as a prefix. Optional arguments can be provided to construct // via fmt.Sprintf(). -func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { +func HavePrefix(prefix string, args ...any) types.GomegaMatcher { return &matchers.HavePrefixMatcher{ Prefix: prefix, Args: args, @@ -263,7 +263,7 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { // HaveSuffix succeeds if actual is a string or stringer that contains the // passed-in string as a suffix. Optional arguments can be provided to construct // via fmt.Sprintf(). -func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { +func HaveSuffix(suffix string, args ...any) types.GomegaMatcher { return &matchers.HaveSuffixMatcher{ Suffix: suffix, Args: args, @@ -273,7 +273,7 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { // MatchJSON succeeds if actual is a string or stringer of JSON that matches // the expected JSON. The JSONs are decoded and the resulting objects are compared via // reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. -func MatchJSON(json interface{}) types.GomegaMatcher { +func MatchJSON(json any) types.GomegaMatcher { return &matchers.MatchJSONMatcher{ JSONToMatch: json, } @@ -282,7 +282,7 @@ func MatchJSON(json interface{}) types.GomegaMatcher { // MatchXML succeeds if actual is a string or stringer of XML that matches // the expected XML. The XMLs are decoded and the resulting objects are compared via // reflect.DeepEqual so things like whitespaces shouldn't matter. -func MatchXML(xml interface{}) types.GomegaMatcher { +func MatchXML(xml any) types.GomegaMatcher { return &matchers.MatchXMLMatcher{ XMLToMatch: xml, } @@ -291,7 +291,7 @@ func MatchXML(xml interface{}) types.GomegaMatcher { // MatchYAML succeeds if actual is a string or stringer of YAML that matches // the expected YAML. The YAML's are decoded and the resulting objects are compared via // reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. -func MatchYAML(yaml interface{}) types.GomegaMatcher { +func MatchYAML(yaml any) types.GomegaMatcher { return &matchers.MatchYAMLMatcher{ YAMLToMatch: yaml, } @@ -338,7 +338,7 @@ func BeZero() types.GomegaMatcher { // // var findings []string // Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) -func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher { +func ContainElement(element any, result ...any) types.GomegaMatcher { return &matchers.ContainElementMatcher{ Element: element, Result: result, @@ -358,7 +358,7 @@ func ContainElement(element interface{}, result ...interface{}) types.GomegaMatc // Expect(2).Should(BeElementOf(1, 2)) // // Actual must be typed. -func BeElementOf(elements ...interface{}) types.GomegaMatcher { +func BeElementOf(elements ...any) types.GomegaMatcher { return &matchers.BeElementOfMatcher{ Elements: elements, } @@ -368,7 +368,7 @@ func BeElementOf(elements ...interface{}) types.GomegaMatcher { // BeKeyOf() always uses Equal() to perform the match between actual and the map keys. // // Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false})) -func BeKeyOf(element interface{}) types.GomegaMatcher { +func BeKeyOf(element any) types.GomegaMatcher { return &matchers.BeKeyOfMatcher{ Map: element, } @@ -388,14 +388,14 @@ func BeKeyOf(element interface{}) types.GomegaMatcher { // // Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) // -// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. -func ConsistOf(elements ...interface{}) types.GomegaMatcher { +// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []any are different types - hence the need for this special rule. +func ConsistOf(elements ...any) types.GomegaMatcher { return &matchers.ConsistOfMatcher{ Elements: elements, } } -// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elements passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) @@ -403,7 +403,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo"))) // // Actual must be an array or slice. -func HaveExactElements(elements ...interface{}) types.GomegaMatcher { +func HaveExactElements(elements ...any) types.GomegaMatcher { return &matchers.HaveExactElementsMatcher{ Elements: elements, } @@ -417,7 +417,7 @@ func HaveExactElements(elements ...interface{}) types.GomegaMatcher { // // Actual must be an array, slice or map. // For maps, ContainElements searches through the map's values. -func ContainElements(elements ...interface{}) types.GomegaMatcher { +func ContainElements(elements ...any) types.GomegaMatcher { return &matchers.ContainElementsMatcher{ Elements: elements, } @@ -432,7 +432,7 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher { // // Actual must be an array, slice or map. // For maps, HaveEach searches through the map's values. -func HaveEach(element interface{}) types.GomegaMatcher { +func HaveEach(element any) types.GomegaMatcher { return &matchers.HaveEachMatcher{ Element: element, } @@ -443,7 +443,7 @@ func HaveEach(element interface{}) types.GomegaMatcher { // matcher can be passed in instead: // // Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) -func HaveKey(key interface{}) types.GomegaMatcher { +func HaveKey(key any) types.GomegaMatcher { return &matchers.HaveKeyMatcher{ Key: key, } @@ -455,7 +455,7 @@ func HaveKey(key interface{}) types.GomegaMatcher { // // Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) // Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) -func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { +func HaveKeyWithValue(key any, value any) types.GomegaMatcher { return &matchers.HaveKeyWithValueMatcher{ Key: key, Value: value, @@ -483,7 +483,7 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { // Expect(book).To(HaveField("Title", ContainSubstring("Les")) // Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) // Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) -func HaveField(field string, expected interface{}) types.GomegaMatcher { +func HaveField(field string, expected any) types.GomegaMatcher { return &matchers.HaveFieldMatcher{ Field: field, Expected: expected, @@ -535,7 +535,7 @@ func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { // Expect(1.0).Should(BeNumerically(">=", 1.0)) // Expect(1.0).Should(BeNumerically("<", 3)) // Expect(1.0).Should(BeNumerically("<=", 1.0)) -func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { +func BeNumerically(comparator string, compareTo ...any) types.GomegaMatcher { return &matchers.BeNumericallyMatcher{ Comparator: comparator, CompareTo: compareTo, @@ -562,7 +562,7 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura // Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type // Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type // Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) -func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { +func BeAssignableToTypeOf(expected any) types.GomegaMatcher { return &matchers.AssignableToTypeOfMatcher{ Expected: expected, } @@ -581,7 +581,7 @@ func Panic() types.GomegaMatcher { // matcher can be passed in instead: // // Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) -func PanicWith(expected interface{}) types.GomegaMatcher { +func PanicWith(expected any) types.GomegaMatcher { return &matchers.PanicMatcher{Expected: expected} } @@ -610,7 +610,7 @@ func BeADirectory() types.GomegaMatcher { // Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 // Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" // Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 -func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { +func HaveHTTPStatus(expected ...any) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} } @@ -618,7 +618,7 @@ func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { // Actual must be either a *http.Response or *httptest.ResponseRecorder. // Expected must be a string header name, followed by a header value which // can be a string, or another matcher. -func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher { +func HaveHTTPHeaderWithValue(header string, value any) types.GomegaMatcher { return &matchers.HaveHTTPHeaderWithValueMatcher{ Header: header, Value: value, @@ -628,7 +628,7 @@ func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatch // HaveHTTPBody matches if the body matches. // Actual must be either a *http.Response or *httptest.ResponseRecorder. // Expected must be either a string, []byte, or other matcher -func HaveHTTPBody(expected interface{}) types.GomegaMatcher { +func HaveHTTPBody(expected any) types.GomegaMatcher { return &matchers.HaveHTTPBodyMatcher{Expected: expected} } @@ -687,15 +687,15 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher { // Expect(1).To(WithTransform(failingplus1, Equal(2))) // // And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. -func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { +func WithTransform(transform any, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } // Satisfy matches the actual value against the `predicate` function. -// The given predicate must be a function of one paramter that returns bool. +// The given predicate must be a function of one parameter that returns bool. // // var isEven = func(i int) bool { return i%2 == 0 } // Expect(2).To(Satisfy(isEven)) -func Satisfy(predicate interface{}) types.GomegaMatcher { +func Satisfy(predicate any) types.GomegaMatcher { return matchers.NewSatisfyMatcher(predicate) } diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go index 6bd826adc5..db48e90b37 100644 --- a/vendor/github.com/onsi/gomega/matchers/and.go +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -14,7 +14,7 @@ type AndMatcher struct { firstFailedMatcher types.GomegaMatcher } -func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { +func (m *AndMatcher) Match(actual any) (success bool, err error) { m.firstFailedMatcher = nil for _, matcher := range m.Matchers { success, err := matcher.Match(actual) @@ -26,16 +26,16 @@ func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { return true, nil } -func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { +func (m *AndMatcher) FailureMessage(actual any) (message string) { return m.firstFailedMatcher.FailureMessage(actual) } -func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *AndMatcher) NegatedFailureMessage(actual any) (message string) { // not the most beautiful list of matchers, but not bad either... return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) } -func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *AndMatcher) MatchMayChangeInTheFuture(actual any) bool { /* Example with 3 matchers: A, B, C diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go index be48395201..a100e5c07e 100644 --- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -10,10 +10,10 @@ import ( ) type AssignableToTypeOfMatcher struct { - Expected interface{} + Expected any } -func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *AssignableToTypeOfMatcher) Match(actual any) (success bool, err error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } else if matcher.Expected == nil { @@ -28,10 +28,10 @@ func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success boo return actualType.AssignableTo(expectedType), nil } -func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual any) string { return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) } -func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual any) string { return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go index 93d4497c70..1d82360484 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -24,11 +24,11 @@ func (t notADirectoryError) Error() string { } type BeADirectoryMatcher struct { - expected interface{} + expected any err error } -func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeADirectoryMatcher) Match(actual any) (success bool, err error) { actualFilename, ok := actual.(string) if !ok { return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") @@ -47,10 +47,10 @@ func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err return true, nil } -func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeADirectoryMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) } -func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not be a directory") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go index 8fefc4deb7..3e53d6285b 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -24,11 +24,11 @@ func (t notARegularFileError) Error() string { } type BeARegularFileMatcher struct { - expected interface{} + expected any err error } -func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeARegularFileMatcher) Match(actual any) (success bool, err error) { actualFilename, ok := actual.(string) if !ok { return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") @@ -47,10 +47,10 @@ func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, e return true, nil } -func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeARegularFileMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) } -func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not be a regular file") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go index e2bdd28113..04f156db39 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -10,10 +10,10 @@ import ( ) type BeAnExistingFileMatcher struct { - expected interface{} + expected any } -func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeAnExistingFileMatcher) Match(actual any) (success bool, err error) { actualFilename, ok := actual.(string) if !ok { return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") @@ -31,10 +31,10 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, return true, nil } -func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to exist") } -func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to exist") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go index f13c24490f..4319dde455 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -12,7 +12,7 @@ import ( type BeClosedMatcher struct { } -func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeClosedMatcher) Match(actual any) (success bool, err error) { if !isChan(actual) { return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) } @@ -39,10 +39,10 @@ func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err err return closed, nil } -func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeClosedMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be closed") } -func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "to be open") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 4e3897858c..532fc37449 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -9,11 +9,11 @@ import ( ) type BeComparableToMatcher struct { - Expected interface{} + Expected any Options cmp.Options } -func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) { +func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } @@ -40,10 +40,10 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil } -func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeComparableToMatcher) FailureMessage(actual any) (message string) { return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } -func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go index 9ee75a5d51..406fe54843 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -10,10 +10,10 @@ import ( ) type BeElementOfMatcher struct { - Elements []interface{} + Elements []any } -func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeElementOfMatcher) Match(actual any) (success bool, err error) { if reflect.TypeOf(actual) == nil { return false, fmt.Errorf("BeElement matcher expects actual to be typed") } @@ -34,10 +34,10 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err return false, lastError } -func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeElementOfMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be an element of", presentable(matcher.Elements)) } -func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be an element of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go index bd7f0b96e7..e9e0644f32 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -13,7 +13,7 @@ import ( type BeEmptyMatcher struct { } -func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeEmptyMatcher) Match(actual any) (success bool, err error) { // short-circuit the iterator case, as we only need to see the first // element, if any. if miter.IsIter(actual) { @@ -34,10 +34,10 @@ func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err erro return length == 0, nil } -func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeEmptyMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be empty") } -func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be empty") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go index 263627f408..37b3080ba7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -10,10 +10,10 @@ import ( ) type BeEquivalentToMatcher struct { - Expected interface{} + Expected any } -func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeEquivalentToMatcher) Match(actual any) (success bool, err error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Both actual and expected must not be nil.") } @@ -27,10 +27,10 @@ func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, e return reflect.DeepEqual(convertedActual, matcher.Expected), nil } -func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeEquivalentToMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be equivalent to", matcher.Expected) } -func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be equivalent to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go index 8ee2b1c51e..55e869515a 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -12,7 +12,7 @@ type BeFalseMatcher struct { Reason string } -func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeFalseMatcher) Match(actual any) (success bool, err error) { if !isBool(actual) { return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) } @@ -20,7 +20,7 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro return actual == false, nil } -func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeFalseMatcher) FailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "to be false") } else { @@ -28,7 +28,7 @@ func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message strin } } -func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "not to be false") } else { diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go index 631ce11e33..579aa41b31 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -10,10 +10,10 @@ import ( ) type BeIdenticalToMatcher struct { - Expected interface{} + Expected any } -func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { +func (matcher *BeIdenticalToMatcher) Match(actual any) (success bool, matchErr error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } @@ -30,10 +30,10 @@ func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, ma return actual == matcher.Expected, nil } -func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { +func (matcher *BeIdenticalToMatcher) FailureMessage(actual any) string { return format.Message(actual, "to be identical to", matcher.Expected) } -func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual any) string { return format.Message(actual, "not to be identical to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go index 449a291ef9..3fff3df784 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go @@ -8,10 +8,10 @@ import ( ) type BeKeyOfMatcher struct { - Map interface{} + Map any } -func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeKeyOfMatcher) Match(actual any) (success bool, err error) { if !isMap(matcher.Map) { return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type") } @@ -36,10 +36,10 @@ func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err erro return false, lastError } -func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeKeyOfMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map))) } -func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map))) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go index 551d99d747..cab37f4f95 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -7,14 +7,14 @@ import "github.com/onsi/gomega/format" type BeNilMatcher struct { } -func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeNilMatcher) Match(actual any) (success bool, err error) { return isNil(actual), nil } -func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeNilMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be nil") } -func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeNilMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be nil") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go index 100735de32..7e6ce154e1 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -11,18 +11,18 @@ import ( type BeNumericallyMatcher struct { Comparator string - CompareTo []interface{} + CompareTo []any } -func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeNumericallyMatcher) FailureMessage(actual any) (message string) { return matcher.FormatFailureMessage(actual, false) } -func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual any) (message string) { return matcher.FormatFailureMessage(actual, true) } -func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) { +func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual any, negated bool) (message string) { if len(matcher.CompareTo) == 1 { message = fmt.Sprintf("to be %s", matcher.Comparator) } else { @@ -34,7 +34,7 @@ func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, ne return format.Message(actual, message, matcher.CompareTo[0]) } -func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeNumericallyMatcher) Match(actual any) (success bool, err error) { if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go index cf582a3fcb..14ffbf6c4c 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -10,11 +10,11 @@ import ( ) type BeSentMatcher struct { - Arg interface{} + Arg any channelClosed bool } -func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeSentMatcher) Match(actual any) (success bool, err error) { if !isChan(actual) { return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) } @@ -56,15 +56,15 @@ func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error return didSend, nil } -func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeSentMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to send:", matcher.Arg) } -func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeSentMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to send:", matcher.Arg) } -func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual any) bool { if !isChan(actual) { return false } diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go index dec4db024e..edb647c6f2 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -15,17 +15,17 @@ type BeTemporallyMatcher struct { Threshold []time.Duration } -func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeTemporallyMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) } -func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) } -func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { +func (matcher *BeTemporallyMatcher) Match(actual any) (bool, error) { // predicate to test for time.Time type - isTime := func(t interface{}) bool { + isTime := func(t any) bool { _, ok := t.(time.Time) return ok } diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go index 3576aac884..a010bec5ad 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -12,7 +12,7 @@ type BeTrueMatcher struct { Reason string } -func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeTrueMatcher) Match(actual any) (success bool, err error) { if !isBool(actual) { return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) } @@ -20,7 +20,7 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error return actual.(bool), nil } -func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeTrueMatcher) FailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "to be true") } else { @@ -28,7 +28,7 @@ func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string } } -func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "not to be true") } else { diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go index 26196f168f..f5f5d7f7d7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -9,7 +9,7 @@ import ( type BeZeroMatcher struct { } -func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeZeroMatcher) Match(actual any) (success bool, err error) { if actual == nil { return true, nil } @@ -19,10 +19,10 @@ func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error } -func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeZeroMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be zero-valued") } -func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be zero-valued") } diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index a111881825..05c751b664 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -12,12 +12,12 @@ import ( ) type ConsistOfMatcher struct { - Elements []interface{} - missingElements []interface{} - extraElements []interface{} + Elements []any + missingElements []any + extraElements []any } -func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ConsistOfMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -35,19 +35,19 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er return true, nil } - var missingMatchers []interface{} + var missingMatchers []any matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges) matcher.missingElements = equalMatchersToElements(missingMatchers) return false, nil } -func neighbours(value, matcher interface{}) (bool, error) { +func neighbours(value, matcher any) (bool, error) { match, err := matcher.(omegaMatcher).Match(value) return match && err == nil, nil } -func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { +func equalMatchersToElements(matchers []any) (elements []any) { for _, matcher := range matchers { if equalMatcher, ok := matcher.(*EqualMatcher); ok { elements = append(elements, equalMatcher.Expected) @@ -60,7 +60,7 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { return } -func flatten(elems []interface{}) []interface{} { +func flatten(elems []any) []any { if len(elems) != 1 || !(isArrayOrSlice(elems[0]) || (miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) { @@ -77,14 +77,14 @@ func flatten(elems []interface{}) []interface{} { } value := reflect.ValueOf(elems[0]) - flattened := make([]interface{}, value.Len()) + flattened := make([]any, value.Len()) for i := 0; i < value.Len(); i++ { flattened[i] = value.Index(i).Interface() } return flattened } -func matchers(expectedElems []interface{}) (matchers []interface{}) { +func matchers(expectedElems []any) (matchers []any) { for _, e := range flatten(expectedElems) { if e == nil { matchers = append(matchers, &BeNilMatcher{}) @@ -97,11 +97,11 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) { return } -func presentable(elems []interface{}) interface{} { +func presentable(elems []any) any { elems = flatten(elems) if len(elems) == 0 { - return []interface{}{} + return []any{} } sv := reflect.ValueOf(elems) @@ -125,9 +125,9 @@ func presentable(elems []interface{}) interface{} { return ss.Interface() } -func valuesOf(actual interface{}) []interface{} { +func valuesOf(actual any) []any { value := reflect.ValueOf(actual) - values := []interface{}{} + values := []any{} if miter.IsIter(actual) { if miter.IsSeq2(actual) { miter.IterateKV(actual, func(k, v reflect.Value) bool { @@ -154,7 +154,7 @@ func valuesOf(actual interface{}) []interface{} { return values } -func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ConsistOfMatcher) FailureMessage(actual any) (message string) { message = format.Message(actual, "to consist of", presentable(matcher.Elements)) message = appendMissingElements(message, matcher.missingElements) if len(matcher.extraElements) > 0 { @@ -164,7 +164,7 @@ func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message str return } -func appendMissingElements(message string, missingElements []interface{}) string { +func appendMissingElements(message string, missingElements []any) string { if len(missingElements) == 0 { return message } @@ -172,6 +172,6 @@ func appendMissingElements(message string, missingElements []interface{}) string format.Object(presentable(missingElements), 1)) } -func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to consist of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 830239c7b6..8337a5261c 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -12,11 +12,11 @@ import ( ) type ContainElementMatcher struct { - Element interface{} - Result []interface{} + Element any + Result []any } -func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ContainElementMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1)) } @@ -132,14 +132,14 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e var lastError error if !miter.IsIter(actual) { - var valueAt func(int) interface{} + var valueAt func(int) any var foundAt func(int) // We're dealing with an array/slice/map, so in all cases we can iterate // over the elements in actual using indices (that can be considered // keys in case of maps). if isMap(actual) { keys := value.MapKeys() - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.MapIndex(keys[i]).Interface() } if result.Kind() != reflect.Invalid { @@ -150,7 +150,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e } } } else { - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.Index(i).Interface() } if result.Kind() != reflect.Invalid { @@ -251,7 +251,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e } // pick up any findings the test is interested in as it specified a non-nil - // result reference. However, the expection always is that there are at + // result reference. However, the expectation always is that there are at // least one or multiple findings. So, if a result is expected, but we had // no findings, then this is an error. findings := getFindings() @@ -284,10 +284,10 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e return true, nil } -func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to contain element matching", matcher.Element) } -func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain element matching", matcher.Element) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index d9fcb8b804..ce3041892b 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -9,11 +9,11 @@ import ( ) type ContainElementsMatcher struct { - Elements []interface{} - missingElements []interface{} + Elements []any + missingElements []any } -func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ContainElementsMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -35,11 +35,11 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, return false, nil } -func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementsMatcher) FailureMessage(actual any) (message string) { message = format.Message(actual, "to contain elements", presentable(matcher.Elements)) return appendMissingElements(message, matcher.missingElements) } -func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go index e725f8c275..d9980ee26b 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -11,10 +11,10 @@ import ( type ContainSubstringMatcher struct { Substr string - Args []interface{} + Args []any } -func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ContainSubstringMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) @@ -31,10 +31,10 @@ func (matcher *ContainSubstringMatcher) stringToMatch() string { return stringToMatch } -func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ContainSubstringMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to contain substring", matcher.stringToMatch()) } -func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain substring", matcher.stringToMatch()) } diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go index befb7bdfd8..4ad166157a 100644 --- a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -9,10 +9,10 @@ import ( ) type EqualMatcher struct { - Expected interface{} + Expected any } -func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *EqualMatcher) Match(actual any) (success bool, err error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } @@ -27,7 +27,7 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) return reflect.DeepEqual(actual, matcher.Expected), nil } -func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *EqualMatcher) FailureMessage(actual any) (message string) { actualString, actualOK := actual.(string) expectedString, expectedOK := matcher.Expected.(string) if actualOK && expectedOK { @@ -37,6 +37,6 @@ func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) return format.Message(actual, "to equal", matcher.Expected) } -func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *EqualMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to equal", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go index 9856752f13..a4fcfc425a 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -12,7 +12,7 @@ type HaveCapMatcher struct { Count int } -func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveCapMatcher) Match(actual any) (success bool, err error) { length, ok := capOf(actual) if !ok { return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) @@ -21,10 +21,10 @@ func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err erro return length == matcher.Count, nil } -func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveCapMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) } -func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go index 4111f2b867..4c45063bd8 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -9,10 +9,10 @@ import ( ) type HaveEachMatcher struct { - Element interface{} + Element any } -func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveEachMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) @@ -61,14 +61,14 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err format.Object(actual, 1)) } - var valueAt func(int) interface{} + var valueAt func(int) any if isMap(actual) { keys := value.MapKeys() - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.MapIndex(keys[i]).Interface() } } else { - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.Index(i).Interface() } } @@ -89,11 +89,11 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err } // FailureMessage returns a suitable failure message. -func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveEachMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to contain element matching", matcher.Element) } // NegatedFailureMessage returns a suitable negated failure message. -func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveEachMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain element matching", matcher.Element) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index 23799f1c6f..8b2d297c57 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -14,13 +14,13 @@ type mismatchFailure struct { } type HaveExactElementsMatcher struct { - Elements []interface{} + Elements []any mismatchFailures []mismatchFailure missingIndex int extraIndex int } -func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveExactElementsMatcher) Match(actual any) (success bool, err error) { matcher.resetState() if isMap(actual) || miter.IsSeq2(actual) { @@ -108,7 +108,7 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool return success, nil } -func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveExactElementsMatcher) FailureMessage(actual any) (message string) { message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements)) if matcher.missingIndex > 0 { message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex) @@ -125,7 +125,7 @@ func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (mes return } -func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go index b57018745f..a5a028e9a6 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go @@ -11,7 +11,7 @@ type HaveExistingFieldMatcher struct { Field string } -func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveExistingFieldMatcher) Match(actual any) (success bool, err error) { // we don't care about the field's actual value, just about any error in // trying to find the field (or method). _, err = extractField(actual, matcher.Field, "HaveExistingField") @@ -27,10 +27,10 @@ func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool return false, err } -func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveExistingFieldMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field) } -func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 293457e85e..d9fbeaf752 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (any, error) { +func extractField(actual any, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -68,7 +68,7 @@ func extractField(actual interface{}, field string, matchername string) (any, er type HaveFieldMatcher struct { Field string - Expected interface{} + Expected any } func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { @@ -80,7 +80,7 @@ func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { return expectedMatcher } -func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveFieldMatcher) Match(actual any) (success bool, err error) { extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err @@ -89,7 +89,7 @@ func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err er return matcher.expectedMatcher().Match(extractedField) } -func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveFieldMatcher) FailureMessage(actual any) (message string) { extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { // this really shouldn't happen @@ -101,7 +101,7 @@ func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message str return message } -func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual any) (message string) { extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { // this really shouldn't happen diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go index d14d9e5fc6..2d561b9a22 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go @@ -11,12 +11,12 @@ import ( ) type HaveHTTPBodyMatcher struct { - Expected interface{} - cachedResponse interface{} + Expected any + cachedResponse any cachedBody []byte } -func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) { +func (matcher *HaveHTTPBodyMatcher) Match(actual any) (bool, error) { body, err := matcher.body(actual) if err != nil { return false, err @@ -34,7 +34,7 @@ func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) { } } -func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual any) (message string) { body, err := matcher.body(actual) if err != nil { return fmt.Sprintf("failed to read body: %s", err) @@ -52,7 +52,7 @@ func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message } } -func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual any) (message string) { body, err := matcher.body(actual) if err != nil { return fmt.Sprintf("failed to read body: %s", err) @@ -73,7 +73,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m // body returns the body. It is cached because once we read it in Match() // the Reader is closed and it is not readable again in FailureMessage() // or NegatedFailureMessage() -func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) { +func (matcher *HaveHTTPBodyMatcher) body(actual any) ([]byte, error) { if matcher.cachedResponse == actual && matcher.cachedBody != nil { return matcher.cachedBody, nil } diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go index c256f452e8..756722659b 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go @@ -11,10 +11,10 @@ import ( type HaveHTTPHeaderWithValueMatcher struct { Header string - Value interface{} + Value any } -func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual any) (success bool, err error) { headerValue, err := matcher.extractHeader(actual) if err != nil { return false, err @@ -28,7 +28,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (succes return headerMatcher.Match(headerValue) } -func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string { +func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual any) string { headerValue, err := matcher.extractHeader(actual) if err != nil { panic(err) // protected by Match() @@ -43,7 +43,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{} return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff) } -func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual any) (message string) { headerValue, err := matcher.extractHeader(actual) if err != nil { panic(err) // protected by Match() @@ -69,7 +69,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatc } } -func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) { +func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual any) (string, error) { switch r := actual.(type) { case *http.Response: return r.Header.Get(matcher.Header), nil diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go index 0f66e46ece..8b25b3a9f9 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go @@ -12,10 +12,10 @@ import ( ) type HaveHTTPStatusMatcher struct { - Expected []interface{} + Expected []any } -func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveHTTPStatusMatcher) Match(actual any) (success bool, err error) { var resp *http.Response switch a := actual.(type) { case *http.Response: @@ -48,11 +48,11 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e return false, nil } -func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString()) } -func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString()) } @@ -64,7 +64,7 @@ func (matcher *HaveHTTPStatusMatcher) expectedString() string { return strings.Join(lines, "\n") } -func formatHttpResponse(input interface{}) string { +func formatHttpResponse(input any) string { var resp *http.Response switch r := input.(type) { case *http.Response: diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index b62ee93cb0..9e16dcf5d6 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -11,10 +11,10 @@ import ( ) type HaveKeyMatcher struct { - Key interface{} + Key any } -func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) { if !isMap(actual) && !miter.IsSeq2(actual) { return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } @@ -52,7 +52,7 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro return false, nil } -func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyMatcher) FailureMessage(actual any) (message string) { switch matcher.Key.(type) { case omegaMatcher: return format.Message(actual, "to have key matching", matcher.Key) @@ -61,7 +61,7 @@ func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message strin } } -func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual any) (message string) { switch matcher.Key.(type) { case omegaMatcher: return format.Message(actual, "not to have key matching", matcher.Key) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 3d608f63eb..1c53f1e56a 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -11,11 +11,11 @@ import ( ) type HaveKeyWithValueMatcher struct { - Key interface{} - Value interface{} + Key any + Value any } -func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err error) { if !isMap(actual) && !miter.IsSeq2(actual) { return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } @@ -70,7 +70,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, return false, nil } -func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual any) (message string) { str := "to have {key: value}" if _, ok := matcher.Key.(omegaMatcher); ok { str += " matching" @@ -78,12 +78,12 @@ func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (mess str += " matching" } - expect := make(map[interface{}]interface{}, 1) + expect := make(map[any]any, 1) expect[matcher.Key] = matcher.Value return format.Message(actual, str, expect) } -func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual any) (message string) { kStr := "not to have key" if _, ok := matcher.Key.(omegaMatcher); ok { kStr = "not to have key matching" diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go index ca25713fe2..c334d4c0aa 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -10,7 +10,7 @@ type HaveLenMatcher struct { Count int } -func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveLenMatcher) Match(actual any) (success bool, err error) { length, ok := lengthOf(actual) if !ok { return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) @@ -19,10 +19,10 @@ func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err erro return length == matcher.Count, nil } -func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveLenMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) } -func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index 22a1b67306..a240f1a1c7 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -11,7 +11,7 @@ import ( type HaveOccurredMatcher struct { } -func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveOccurredMatcher) Match(actual any) (success bool, err error) { // is purely nil? if actual == nil { return false, nil @@ -26,10 +26,10 @@ func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err return !isNil(actual), nil } -func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveOccurredMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) } -func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred") } diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go index 1d8e80270b..7987d41f7b 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -8,10 +8,10 @@ import ( type HavePrefixMatcher struct { Prefix string - Args []interface{} + Args []any } -func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HavePrefixMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) @@ -27,10 +27,10 @@ func (matcher *HavePrefixMatcher) prefix() string { return matcher.Prefix } -func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HavePrefixMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to have prefix", matcher.prefix()) } -func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to have prefix", matcher.prefix()) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go index 40a3526eb2..2aa4ceacbc 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -8,10 +8,10 @@ import ( type HaveSuffixMatcher struct { Suffix string - Args []interface{} + Args []any } -func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveSuffixMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) @@ -27,10 +27,10 @@ func (matcher *HaveSuffixMatcher) suffix() string { return matcher.Suffix } -func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveSuffixMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to have suffix", matcher.suffix()) } -func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to have suffix", matcher.suffix()) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go index f672528357..4c39e0db00 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_value.go +++ b/vendor/github.com/onsi/gomega/matchers/have_value.go @@ -12,10 +12,10 @@ const maxIndirections = 31 type HaveValueMatcher struct { Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value. - resolvedActual interface{} // the ("resolved") value. + resolvedActual any // the ("resolved") value. } -func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) { +func (m *HaveValueMatcher) Match(actual any) (bool, error) { val := reflect.ValueOf(actual) for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- { // return an error if value isn't valid. Please note that we cannot @@ -45,10 +45,10 @@ func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) { return false, errors.New(format.Message(actual, "too many indirections")) } -func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) { +func (m *HaveValueMatcher) FailureMessage(_ any) (message string) { return m.Matcher.FailureMessage(m.resolvedActual) } -func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) { +func (m *HaveValueMatcher) NegatedFailureMessage(_ any) (message string) { return m.Matcher.NegatedFailureMessage(m.resolvedActual) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c539dd389c..f9d313772f 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -71,14 +71,14 @@ func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) { format.Object(expected, 1)) } -func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchErrorMatcher) FailureMessage(actual any) (message string) { if matcher.isFunc { return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0])) } return format.Message(actual, "to match error", matcher.Expected) } -func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.isFunc { return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0])) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go index f962f139ff..331f289abc 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -9,18 +9,18 @@ import ( ) type MatchJSONMatcher struct { - JSONToMatch interface{} - firstFailurePath []interface{} + JSONToMatch any + firstFailurePath []any } -func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchJSONMatcher) Match(actual any) (success bool, err error) { actualString, expectedString, err := matcher.prettyPrint(actual) if err != nil { return false, err } - var aval interface{} - var eval interface{} + var aval any + var eval any // this is guarded by prettyPrint json.Unmarshal([]byte(actualString), &aval) @@ -30,17 +30,17 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er return equal, nil } -func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchJSONMatcher) FailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.prettyPrint(actual) return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.prettyPrint(actual) return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { +func (matcher *MatchJSONMatcher) prettyPrint(actual any) (actualFormatted, expectedFormatted string, err error) { actualString, ok := toString(actual) if !ok { return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go index adac5db6b8..779be683e0 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -9,10 +9,10 @@ import ( type MatchRegexpMatcher struct { Regexp string - Args []interface{} + Args []any } -func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchRegexpMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) @@ -26,11 +26,11 @@ func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err return match, nil } -func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchRegexpMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to match regular expression", matcher.regexp()) } -func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to match regular expression", matcher.regexp()) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go index 5c815f5af7..f7dcaf6fdc 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -15,10 +15,10 @@ import ( ) type MatchXMLMatcher struct { - XMLToMatch interface{} + XMLToMatch any } -func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchXMLMatcher) Match(actual any) (success bool, err error) { actualString, expectedString, err := matcher.formattedPrint(actual) if err != nil { return false, err @@ -37,17 +37,17 @@ func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err err return reflect.DeepEqual(aval, eval), nil } -func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchXMLMatcher) FailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.formattedPrint(actual) return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) } -func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.formattedPrint(actual) return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) } -func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { +func (matcher *MatchXMLMatcher) formattedPrint(actual any) (actualString, expectedString string, err error) { var ok bool actualString, ok = toString(actual) if !ok { diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 2cb6b47db9..95057c26cc 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -9,18 +9,18 @@ import ( ) type MatchYAMLMatcher struct { - YAMLToMatch interface{} - firstFailurePath []interface{} + YAMLToMatch any + firstFailurePath []any } -func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchYAMLMatcher) Match(actual any) (success bool, err error) { actualString, expectedString, err := matcher.toStrings(actual) if err != nil { return false, err } - var aval interface{} - var eval interface{} + var aval any + var eval any if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) @@ -34,23 +34,23 @@ func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err er return equal, nil } -func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchYAMLMatcher) FailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.toNormalisedStrings(actual) return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.toNormalisedStrings(actual) return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual any) (actualFormatted, expectedFormatted string, err error) { actualString, expectedString, err := matcher.toStrings(actual) return normalise(actualString), normalise(expectedString), err } func normalise(input string) string { - var val interface{} + var val any err := yaml.Unmarshal([]byte(input), &val) if err != nil { panic(err) // unreachable since Match already calls Unmarshal @@ -62,7 +62,7 @@ func normalise(input string) string { return strings.TrimSpace(string(output)) } -func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { +func (matcher *MatchYAMLMatcher) toStrings(actual any) (actualFormatted, expectedFormatted string, err error) { actualString, ok := toString(actual) if !ok { return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go index 78b71910d1..c598b7899a 100644 --- a/vendor/github.com/onsi/gomega/matchers/not.go +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -8,7 +8,7 @@ type NotMatcher struct { Matcher types.GomegaMatcher } -func (m *NotMatcher) Match(actual interface{}) (bool, error) { +func (m *NotMatcher) Match(actual any) (bool, error) { success, err := m.Matcher.Match(actual) if err != nil { return false, err @@ -16,14 +16,14 @@ func (m *NotMatcher) Match(actual interface{}) (bool, error) { return !success, nil } -func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { +func (m *NotMatcher) FailureMessage(actual any) (message string) { return m.Matcher.NegatedFailureMessage(actual) // works beautifully } -func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *NotMatcher) NegatedFailureMessage(actual any) (message string) { return m.Matcher.FailureMessage(actual) // works beautifully } -func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *NotMatcher) MatchMayChangeInTheFuture(actual any) bool { return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value } diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go index 841ae26ab0..6578404b0e 100644 --- a/vendor/github.com/onsi/gomega/matchers/or.go +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -14,7 +14,7 @@ type OrMatcher struct { firstSuccessfulMatcher types.GomegaMatcher } -func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { +func (m *OrMatcher) Match(actual any) (success bool, err error) { m.firstSuccessfulMatcher = nil for _, matcher := range m.Matchers { success, err := matcher.Match(actual) @@ -29,16 +29,16 @@ func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { return false, nil } -func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { +func (m *OrMatcher) FailureMessage(actual any) (message string) { // not the most beautiful list of matchers, but not bad either... return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) } -func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *OrMatcher) NegatedFailureMessage(actual any) (message string) { return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) } -func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *OrMatcher) MatchMayChangeInTheFuture(actual any) bool { /* Example with 3 matchers: A, B, C diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go index adc8cee630..8be5a7ccf3 100644 --- a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -8,11 +8,11 @@ import ( ) type PanicMatcher struct { - Expected interface{} - object interface{} + Expected any + object any } -func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *PanicMatcher) Match(actual any) (success bool, err error) { if actual == nil { return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") } @@ -52,7 +52,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) return } -func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *PanicMatcher) FailureMessage(actual any) (message string) { if matcher.Expected == nil { // We wanted any panic to occur, but none did. return format.Message(actual, "to panic") @@ -91,7 +91,7 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) } } -func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *PanicMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.Expected == nil { // We didn't want any panic to occur, but one did. return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 948164eaf8..1d9f61d636 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -11,12 +11,12 @@ import ( ) type ReceiveMatcher struct { - Args []interface{} + Args []any receivedValue reflect.Value channelClosed bool } -func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ReceiveMatcher) Match(actual any) (success bool, err error) { if !isChan(actual) { return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) } @@ -30,7 +30,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - var resultReference interface{} + var resultReference any // Valid arg formats are as follows, always with optional POINTER before // optional MATCHER: @@ -115,8 +115,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro return false, nil } -func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - var matcherArg interface{} +func (matcher *ReceiveMatcher) FailureMessage(actual any) (message string) { + var matcherArg any if len(matcher.Args) > 0 { matcherArg = matcher.Args[len(matcher.Args)-1] } @@ -136,8 +136,8 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin return format.Message(actual, "to receive something."+closedAddendum) } -func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - var matcherArg interface{} +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual any) (message string) { + var matcherArg any if len(matcher.Args) > 0 { matcherArg = matcher.Args[len(matcher.Args)-1] } @@ -157,7 +157,7 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag return format.Message(actual, "not to receive anything."+closedAddendum) } -func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual any) bool { if !isChan(actual) { return false } diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go index ec68fe8b62..2adc4825aa 100644 --- a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go @@ -8,13 +8,13 @@ import ( ) type SatisfyMatcher struct { - Predicate interface{} + Predicate any // cached type predicateArgType reflect.Type } -func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher { +func NewSatisfyMatcher(predicate any) *SatisfyMatcher { if predicate == nil { panic("predicate cannot be nil") } @@ -35,7 +35,7 @@ func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher { } } -func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) { +func (m *SatisfyMatcher) Match(actual any) (success bool, err error) { // prepare a parameter to pass to the predicate var param reflect.Value if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) { @@ -57,10 +57,10 @@ func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) { return result[0].Bool(), nil } -func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) { +func (m *SatisfyMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to satisfy predicate", m.Predicate) } -func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *SatisfyMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "to not satisfy predicate", m.Predicate) } diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go index 1369c1e87f..30dd58f4a5 100644 --- a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go +++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go @@ -8,7 +8,7 @@ import ( "strings" ) -func formattedMessage(comparisonMessage string, failurePath []interface{}) string { +func formattedMessage(comparisonMessage string, failurePath []any) string { var diffMessage string if len(failurePath) == 0 { diffMessage = "" @@ -18,7 +18,7 @@ func formattedMessage(comparisonMessage string, failurePath []interface{}) strin return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) } -func formattedFailurePath(failurePath []interface{}) string { +func formattedFailurePath(failurePath []any) string { formattedPaths := []string{} for i := len(failurePath) - 1; i >= 0; i-- { switch p := failurePath[i].(type) { @@ -34,33 +34,33 @@ func formattedFailurePath(failurePath []interface{}) string { return strings.Join(formattedPaths, "") } -func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { - var errorPath []interface{} +func deepEqual(a any, b any) (bool, []any) { + var errorPath []any if reflect.TypeOf(a) != reflect.TypeOf(b) { return false, errorPath } switch a.(type) { - case []interface{}: - if len(a.([]interface{})) != len(b.([]interface{})) { + case []any: + if len(a.([]any)) != len(b.([]any)) { return false, errorPath } - for i, v := range a.([]interface{}) { - elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + for i, v := range a.([]any) { + elementEqual, keyPath := deepEqual(v, b.([]any)[i]) if !elementEqual { return false, append(keyPath, i) } } return true, errorPath - case map[interface{}]interface{}: - if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) { + case map[any]any: + if len(a.(map[any]any)) != len(b.(map[any]any)) { return false, errorPath } - for k, v1 := range a.(map[interface{}]interface{}) { - v2, ok := b.(map[interface{}]interface{})[k] + for k, v1 := range a.(map[any]any) { + v2, ok := b.(map[any]any)[k] if !ok { return false, errorPath } @@ -71,13 +71,13 @@ func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { } return true, errorPath - case map[string]interface{}: - if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + case map[string]any: + if len(a.(map[string]any)) != len(b.(map[string]any)) { return false, errorPath } - for k, v1 := range a.(map[string]interface{}) { - v2, ok := b.(map[string]interface{})[k] + for k, v1 := range a.(map[string]any) { + v2, ok := b.(map[string]any)[k] if !ok { return false, errorPath } diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go index 327350f7b7..f0b2c4aa66 100644 --- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -14,7 +14,7 @@ type formattedGomegaError interface { type SucceedMatcher struct { } -func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *SucceedMatcher) Match(actual any) (success bool, err error) { // is purely nil? if actual == nil { return true, nil @@ -29,7 +29,7 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro return isNil(actual), nil } -func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *SucceedMatcher) FailureMessage(actual any) (message string) { var fgErr formattedGomegaError if errors.As(actual.(error), &fgErr) { return fgErr.FormattedGomegaError() @@ -37,6 +37,6 @@ func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message strin return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1)) } -func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *SucceedMatcher) NegatedFailureMessage(actual any) (message string) { return "Expected failure, but got no error." } diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go index 830e308274..0d78779d47 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -11,7 +11,7 @@ type BipartiteGraph struct { Edges EdgeSet } -func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { +func NewBipartiteGraph(leftValues, rightValues []any, neighbours func(any, any) (bool, error)) (*BipartiteGraph, error) { left := NodeOrderedSet{} for i, v := range leftValues { left = append(left, Node{ID: i, Value: v}) @@ -41,7 +41,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in // FreeLeftRight returns left node values and right node values // of the BipartiteGraph's nodes which are not part of the given edges. -func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) { +func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []any) { for _, node := range bg.Left { if edges.Free(node) { leftValues = append(leftValues, node.Value) diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go index cd597a2f22..66d3578d51 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -2,7 +2,7 @@ package node type Node struct { ID int - Value interface{} + Value any } type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go index b9440ac7ad..d020dedc30 100644 --- a/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -20,16 +20,16 @@ import ( ) type omegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) + Match(actual any) (success bool, err error) + FailureMessage(actual any) (message string) + NegatedFailureMessage(actual any) (message string) } -func isBool(a interface{}) bool { +func isBool(a any) bool { return reflect.TypeOf(a).Kind() == reflect.Bool } -func isNumber(a interface{}) bool { +func isNumber(a any) bool { if a == nil { return false } @@ -37,22 +37,22 @@ func isNumber(a interface{}) bool { return reflect.Int <= kind && kind <= reflect.Float64 } -func isInteger(a interface{}) bool { +func isInteger(a any) bool { kind := reflect.TypeOf(a).Kind() return reflect.Int <= kind && kind <= reflect.Int64 } -func isUnsignedInteger(a interface{}) bool { +func isUnsignedInteger(a any) bool { kind := reflect.TypeOf(a).Kind() return reflect.Uint <= kind && kind <= reflect.Uint64 } -func isFloat(a interface{}) bool { +func isFloat(a any) bool { kind := reflect.TypeOf(a).Kind() return reflect.Float32 <= kind && kind <= reflect.Float64 } -func toInteger(a interface{}) int64 { +func toInteger(a any) int64 { if isInteger(a) { return reflect.ValueOf(a).Int() } else if isUnsignedInteger(a) { @@ -63,7 +63,7 @@ func toInteger(a interface{}) int64 { panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } -func toUnsignedInteger(a interface{}) uint64 { +func toUnsignedInteger(a any) uint64 { if isInteger(a) { return uint64(reflect.ValueOf(a).Int()) } else if isUnsignedInteger(a) { @@ -74,7 +74,7 @@ func toUnsignedInteger(a interface{}) uint64 { panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } -func toFloat(a interface{}) float64 { +func toFloat(a any) float64 { if isInteger(a) { return float64(reflect.ValueOf(a).Int()) } else if isUnsignedInteger(a) { @@ -85,26 +85,26 @@ func toFloat(a interface{}) float64 { panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } -func isError(a interface{}) bool { +func isError(a any) bool { _, ok := a.(error) return ok } -func isChan(a interface{}) bool { +func isChan(a any) bool { if isNil(a) { return false } return reflect.TypeOf(a).Kind() == reflect.Chan } -func isMap(a interface{}) bool { +func isMap(a any) bool { if a == nil { return false } return reflect.TypeOf(a).Kind() == reflect.Map } -func isArrayOrSlice(a interface{}) bool { +func isArrayOrSlice(a any) bool { if a == nil { return false } @@ -116,14 +116,14 @@ func isArrayOrSlice(a interface{}) bool { } } -func isString(a interface{}) bool { +func isString(a any) bool { if a == nil { return false } return reflect.TypeOf(a).Kind() == reflect.String } -func toString(a interface{}) (string, bool) { +func toString(a any) (string, bool) { aString, isString := a.(string) if isString { return aString, true @@ -147,7 +147,7 @@ func toString(a interface{}) (string, bool) { return "", false } -func lengthOf(a interface{}) (int, bool) { +func lengthOf(a any) (int, bool) { if a == nil { return 0, false } @@ -169,7 +169,7 @@ func lengthOf(a interface{}) (int, bool) { return 0, false } } -func capOf(a interface{}) (int, bool) { +func capOf(a any) (int, bool) { if a == nil { return 0, false } @@ -181,7 +181,7 @@ func capOf(a interface{}) (int, bool) { } } -func isNil(a interface{}) bool { +func isNil(a any) bool { if a == nil { return true } diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go index 6f743b1b32..6231c3b476 100644 --- a/vendor/github.com/onsi/gomega/matchers/with_transform.go +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -9,20 +9,20 @@ import ( type WithTransformMatcher struct { // input - Transform interface{} // must be a function of one parameter that returns one value and an optional error + Transform any // must be a function of one parameter that returns one value and an optional error Matcher types.GomegaMatcher // cached value transformArgType reflect.Type // state - transformedValue interface{} + transformedValue any } // reflect.Type for error var errorT = reflect.TypeOf((*error)(nil)).Elem() -func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { +func NewWithTransformMatcher(transform any, matcher types.GomegaMatcher) *WithTransformMatcher { if transform == nil { panic("transform function cannot be nil") } @@ -43,7 +43,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) } } -func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { +func (m *WithTransformMatcher) Match(actual any) (bool, error) { // prepare a parameter to pass to the Transform function var param reflect.Value if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) { @@ -72,15 +72,15 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { return m.Matcher.Match(m.transformedValue) } -func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { +func (m *WithTransformMatcher) FailureMessage(_ any) (message string) { return m.Matcher.FailureMessage(m.transformedValue) } -func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { +func (m *WithTransformMatcher) NegatedFailureMessage(_ any) (message string) { return m.Matcher.NegatedFailureMessage(m.transformedValue) } -func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ any) bool { // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) // // Querying the next matcher is fine if the transformer always will return the same value. diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 30f2beed30..685a46f373 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int) // A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) } -// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers +// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers type Gomega interface { - Ω(actual interface{}, extra ...interface{}) Assertion - Expect(actual interface{}, extra ...interface{}) Assertion - ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion + Ω(actual any, extra ...any) Assertion + Expect(actual any, extra ...any) Assertion + ExpectWithOffset(offset int, actual any, extra ...any) Assertion - Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Eventually(actualOrCtx any, args ...any) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion - Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Consistently(actualOrCtx any, args ...any) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -37,9 +37,9 @@ type Gomega interface { // // For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) + Match(actual any) (success bool, err error) + FailureMessage(actual any) (message string) + NegatedFailureMessage(actual any) (message string) } /* @@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. */ type OracleMatcher interface { - MatchMayChangeInTheFuture(actual interface{}) bool + MatchMayChangeInTheFuture(actual any) bool } -func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { +func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool { oracleMatcher, ok := matcher.(OracleMatcher) if !ok { return true @@ -67,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { // AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure // they are eventually satisfied type AsyncAssertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool + + // equivalent to above + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion @@ -76,18 +81,18 @@ type AsyncAssertion interface { Within(timeout time.Duration) AsyncAssertion ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion - WithArguments(argsToForward ...interface{}) AsyncAssertion + WithArguments(argsToForward ...any) AsyncAssertion MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers type Assertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool - To(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool - NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) Assertion diff --git a/vendor/github.com/openshift-kni/k8sreporter/reporter.go b/vendor/github.com/openshift-kni/k8sreporter/reporter.go index f5da811f7f..794eb814d3 100644 --- a/vendor/github.com/openshift-kni/k8sreporter/reporter.go +++ b/vendor/github.com/openshift-kni/k8sreporter/reporter.go @@ -104,13 +104,13 @@ func (r *KubernetesReporter) logPods(dirName string) { if !r.namespaceToLog(pod.Namespace) { continue } - f, err := logFileFor(r.reportPath, dirName, pod.Namespace+"-pods_specs") + f, err := logFileFor(r.reportPath, dirName, pod.Namespace+"_"+pod.Name+"_pods_specs") if err != nil { fmt.Fprintf(os.Stderr, "failed to open pods_specs file: %v\n", dirName) return } defer f.Close() - fmt.Fprintf(f, fileSeparator) + j, err := json.MarshalIndent(pod, "", " ") if err != nil { fmt.Println("Failed to marshal pods", err) @@ -123,7 +123,7 @@ func (r *KubernetesReporter) logPods(dirName string) { func (r *KubernetesReporter) logNodes(dirName string) { f, err := logFileFor(r.reportPath, dirName, "nodes") if err != nil { - fmt.Fprintf(os.Stderr, "failed to open nodes file: %v\n", dirName) + fmt.Fprintf(os.Stderr, "failed to open nodes file: %v: %v\n", dirName, err) return } defer f.Close() @@ -153,9 +153,9 @@ func (r *KubernetesReporter) logLogs(since time.Time, dirName string) { if !r.namespaceToLog(pod.Namespace) { continue } - f, err := logFileFor(r.reportPath, dirName, pod.Namespace+"-pods_logs") + f, err := logFileFor(r.reportPath, dirName, pod.Namespace+"_"+pod.Name+"_pods_logs") if err != nil { - fmt.Fprintf(os.Stderr, "failed to open pods_logs file: %v\n", dirName) + fmt.Fprintf(os.Stderr, "failed to open pods_logs file: %v: %v\n", dirName, err) return } defer f.Close() @@ -229,18 +229,18 @@ func (r *KubernetesReporter) logEventsInNamespace(since time.Time, w io.Writer, } func (r *KubernetesReporter) logCustomCR(cr runtimeclient.ObjectList, namespace *string, dirName string) { - f, err := logFileFor(r.reportPath, dirName, "crs") + + fileName := getObjectKind(r.clients.Scheme(), cr) + if namespace != nil { + fileName = fmt.Sprintf("%s_%s", fileName, *namespace) + } + + f, err := logFileFor(r.reportPath, dirName, fileName) if err != nil { fmt.Fprintf(os.Stderr, "failed to open crs file: %v\n", dirName) return } defer f.Close() - fmt.Fprintf(f, fileSeparator) - if namespace != nil { - fmt.Fprintf(f, "Dumping %T in namespace %s\n", cr, *namespace) - } else { - fmt.Fprintf(f, "Dumping %T\n", cr) - } options := []runtimeclient.ListOption{} if namespace != nil { @@ -278,3 +278,22 @@ func cleanDirName(dirName string) string { res = strings.ReplaceAll(res, " ", "_") return res } + +func getObjectKind(typer runtime.ObjectTyper, cr runtimeclient.ObjectList) string { + gvks, _, err := typer.ObjectKinds(cr) + if err != nil { + return "lostfound" + } + + for _, gvk := range gvks { + if len(gvk.Kind) == 0 { + continue + } + if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { + continue + } + return gvk.Kind + } + + return "lostfound" +} diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml index 7c15f83e3e..461415cbc5 100644 --- a/vendor/github.com/openshift/api/.ci-operator.yaml +++ b/vendor/github.com/openshift/api/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.23-openshift-4.19 + tag: rhel-9-release-golang-1.24-openshift-4.20 diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml index 848960e946..c5278d9c8d 100644 --- a/vendor/github.com/openshift/api/.golangci.yaml +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -1,25 +1,69 @@ -linters-settings: - custom: - kal: - type: "module" - description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. - settings: - linters: - enable: - - "maxlength" - - "nobools" - - "statussubresource" - lintersConfig: - conditions: - isFirstField: Warn - useProtobuf: Ignore - usePatchStrategy: Ignore +version: "2" linters: - disable-all: true + default: none enable: - - kal + - kubeapilinter + settings: + custom: + kubeapilinter: + path: tools/_output/bin/kube-api-linter.so + description: kubeapilinter is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enable: + - maxlength + - nobools + - nomaps + - statussubresource + lintersConfig: + conditions: + isFirstField: Warn + usePatchStrategy: Ignore + useProtobuf: Ignore + optionalfields: + pointers: + preference: WhenRequired + policy: SuggestFix + omitEmpty: + # Ignore missing omitempty so that we can omit the omitempty for discoverability. + # Discoverability is for configuration APIs, generally singletons. + # Refer to the API conventions for when to use discoverability (not our default stance). + policy: Ignore + uniqueMarkers: + customMarkers: + - identifier: "openshift:validation:FeatureGateAwareEnum" + attributes: + - featureGate + - requiredFeatureGate + - identifier: "openshift:validation:FeatureGateMaxItems" + attributes: + - featureGate + - requiredFeatureGate + - identifier: "openshift:validation:FeatureGateAwareXValidation" + attributes: + - featureGate + - requiredFeatureGate + - rule + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ issues: # We have a lot of existing issues. # Want to make sure that those adding new fields have an # opportunity to fix them when running the linter locally. max-issues-per-linter: 1000 +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/openshift/api/Dockerfile.ocp b/vendor/github.com/openshift/api/Dockerfile.ocp index 8d50096ed0..0a4c98c488 100644 --- a/vendor/github.com/openshift/api/Dockerfile.ocp +++ b/vendor/github.com/openshift/api/Dockerfile.ocp @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.24-openshift-4.20 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.20:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ @@ -15,7 +15,7 @@ RUN mkdir -p /usr/share/bootkube/manifests/manifests COPY payload-manifests/crds/* /usr/share/bootkube/manifests/manifests # these are applied by the CVO -COPY manifests /manifests +RUN mkdir -p /manifests COPY payload-manifests/crds/* /manifests COPY payload-manifests/featuregates/* /manifests COPY payload-command/empty-resources /manifests diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 6982e4efe7..123efe1029 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -4,7 +4,7 @@ all: build update: update-codegen-crds RUNTIME ?= podman -RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.23-openshift-4.19 +RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 EXCLUDE_DIRS := _output/ dependencymagnet/ hack/ third_party/ tls/ tools/ vendor/ tests/ GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out $(EXCLUDE_DIRS), $(wildcard */)))) diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 934bcd3299..071da98c07 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -99,6 +99,8 @@ and then enforces the following rules. 3. Every test must be run on every TechPreview platform we have jobs for. (Ask for an exception if your feature doesn't support a variant.) 4. Every test must run at least 14 times on every platform/variant. 5. Every test must pass at least 95% of the time on every platform/variant. +6. Test results are taken from the last 7 days if the test was run at least 14 times during that period. Otherwise, data from the last 14 days is used. +7. Test flakes (even if the test eventually passes on a retry) are considered failures and negatively impact the pass rate. If your FeatureGate lacks automated testing, there is an exception process that allows QE to sign off on the promotion by commenting on the PR. diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto index 6f50fcaf95..b0a1a1c083 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.proto +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -186,34 +186,43 @@ message DeploymentConfigSpec { message DeploymentConfigStatus { // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. + // +optional optional int64 latestVersion = 1; // observedGeneration is the most recent generation observed by the deployment config controller. + // +optional optional int64 observedGeneration = 2; // replicas is the total number of pods targeted by this deployment config. + // +optional optional int32 replicas = 3; // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. + // +optional optional int32 updatedReplicas = 4; // availableReplicas is the total number of available pods targeted by this deployment config. + // +optional optional int32 availableReplicas = 5; // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // +optional optional int32 unavailableReplicas = 6; // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger + // +optional optional DeploymentDetails details = 7; // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated DeploymentCondition conditions = 8; // Total number of ready pods targeted by this deployment. + // +optional optional int32 readyReplicas = 9; } diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go index 619c30e828..a66ce09ea5 100644 --- a/vendor/github.com/openshift/api/apps/v1/types.go +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -304,26 +304,35 @@ type DeploymentTriggerImageChangeParams struct { type DeploymentConfigStatus struct { // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. + // +optional LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` // observedGeneration is the most recent generation observed by the deployment config controller. + // +optional ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` // replicas is the total number of pods targeted by this deployment config. + // +optional Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. + // +optional UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` // availableReplicas is the total number of available pods targeted by this deployment config. + // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // +optional UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger + // +optional Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` // Total number of ready pods targeted by this deployment. + // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` } diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto index f7d7b772a7..326ecf2e64 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.proto +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -561,10 +561,12 @@ message SubjectRulesReviewSpec { // SubjectRulesReviewStatus is contains the result of a rules check message SubjectRulesReviewStatus { // rules is the list of rules (no particular sort) that are allowed for the subject + // +optional repeated PolicyRule rules = 1; // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. + // +optional optional string evaluationError = 2; } diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go index bf4071867f..ac9a6759f2 100644 --- a/vendor/github.com/openshift/api/authorization/v1/types.go +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -208,9 +208,11 @@ type SubjectRulesReviewSpec struct { // SubjectRulesReviewStatus is contains the result of a rules check type SubjectRulesReviewStatus struct { // rules is the list of rules (no particular sort) that are allowed for the subject + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. + // +optional EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` } diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto index 92ae73426c..7a18a7f027 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.proto +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -163,11 +163,13 @@ message BuildConfigSpec { // BuildConfigStatus contains current state of the build config object. message BuildConfigStatus { // lastVersion is used to inform about number of last triggered build. + // +optional optional int64 lastVersion = 1; // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + // +optional repeated ImageChangeTriggerStatus imageChangeTriggers = 2; } @@ -465,54 +467,67 @@ message BuildSpec { message BuildStatus { // phase is the point in the build lifecycle. Possible values are // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + // +optional optional string phase = 1; // cancelled describes if a cancel event was triggered for the build. + // +optional optional bool cancelled = 2; // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + // +optional optional string reason = 3; // message is a human-readable message indicating details about why the build has this status. + // +optional optional string message = 4; // startTimestamp is a timestamp representing the server time when this Build started // running in a Pod. // It is represented in RFC3339 form and is in UTC. + // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5; // completionTimestamp is a timestamp representing the server time when this Build was // finished, whether that build failed or succeeded. It reflects the time at which // the Pod running the Build terminated. // It is represented in RFC3339 form and is in UTC. + // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6; // duration contains time.Duration object describing build time. + // +optional optional int64 duration = 7; // outputDockerImageReference contains a reference to the container image that // will be built by this build. Its value is computed from // Build.Spec.Output.To, and should include the registry address, so that // it can be used to push and pull the image. + // +optional optional string outputDockerImageReference = 8; // config is an ObjectReference to the BuildConfig this Build is based on. + // +optional optional .k8s.io.api.core.v1.ObjectReference config = 9; // output describes the container image the build has produced. + // +optional optional BuildStatusOutput output = 10; // stages contains details about each stage that occurs during the build // including start time, duration (in milliseconds), and the steps that // occured within each stage. + // +optional repeated StageInfo stages = 11; // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + // +optional optional string logSnippet = 12; // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated BuildCondition conditions = 13; } diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go index 12bf67db1a..b64aacc060 100644 --- a/vendor/github.com/openshift/api/build/v1/types.go +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -192,54 +192,67 @@ type ImageChangeCause struct { type BuildStatus struct { // phase is the point in the build lifecycle. Possible values are // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + // +optional Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` // cancelled describes if a cancel event was triggered for the build. + // +optional Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + // +optional Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` // message is a human-readable message indicating details about why the build has this status. + // +optional Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // startTimestamp is a timestamp representing the server time when this Build started // running in a Pod. // It is represented in RFC3339 form and is in UTC. + // +optional StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` // completionTimestamp is a timestamp representing the server time when this Build was // finished, whether that build failed or succeeded. It reflects the time at which // the Pod running the Build terminated. // It is represented in RFC3339 form and is in UTC. + // +optional CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` // duration contains time.Duration object describing build time. + // +optional Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` // outputDockerImageReference contains a reference to the container image that // will be built by this build. Its value is computed from // Build.Spec.Output.To, and should include the registry address, so that // it can be used to push and pull the image. + // +optional OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` // config is an ObjectReference to the BuildConfig this Build is based on. + // +optional Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` // output describes the container image the build has produced. + // +optional Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` // stages contains details about each stage that occurs during the build // including start time, duration (in milliseconds), and the steps that // occured within each stage. + // +optional Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + // +optional LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` } @@ -1005,11 +1018,13 @@ const ( // BuildConfigStatus contains current state of the build config object. type BuildConfigStatus struct { // lastVersion is used to inform about number of last triggered build. + // +optional LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + // +optional ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` } diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 61302592ea..eac29a2367 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -72,6 +72,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImageDigestMirrorSetList{}, &ImageTagMirrorSet{}, &ImageTagMirrorSetList{}, + &ImagePolicy{}, + &ImagePolicyList{}, + &ClusterImagePolicy{}, + &ClusterImagePolicyList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 38322b95d5..e1a98cb267 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -146,7 +146,7 @@ type AuditCustomRule struct { // If unset, the 'Default' profile is used as the default. // // +required - Profile AuditProfileType `json:"profile,omitempty"` + Profile AuditProfileType `json:"profile"` } type APIServerServingCerts struct { @@ -155,6 +155,7 @@ type APIServerServingCerts struct { // the defaultServingCertificate will be used. // +optional // +listType=atomic + // +kubebuilder:validation:MaxItems=32 NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` } @@ -165,6 +166,7 @@ type APIServerNamedServingCert struct { // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. // +optional // +listType=atomic + // +kubebuilder:validation:MaxItems=64 Names []string `json:"names,omitempty"` // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. // The secret must exist in the openshift-config namespace and contain the following required fields: diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index a2af4d6544..004e94723e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -5,7 +5,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. @@ -90,6 +90,7 @@ type AuthenticationSpec struct { // +listMapKey=name // +kubebuilder:validation:MaxItems=1 // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -107,6 +108,7 @@ type AuthenticationStatus struct { // If the config map or expected key is not found, no metadata is served. // If the specified metadata is not valid, no metadata is served. // The namespace for this config map is openshift-config-managed. + // +optional IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` // oidcClients is where participating operators place the current OIDC client status @@ -117,6 +119,8 @@ type AuthenticationStatus struct { // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional OIDCClients []OIDCClientStatus `json:"oidcClients"` } @@ -135,7 +139,7 @@ type AuthenticationList struct { } // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth -// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,enum="";None;IntegratedOAuth;OIDC type AuthenticationType string const ( @@ -193,32 +197,50 @@ const ( ) type OIDCProvider struct { - // name of the OIDC provider + // name is a required field that configures the unique human-readable identifier + // associated with the identity provider. + // It is used to distinguish between multiple identity providers + // and has no impact on token validation or authentication mechanics. + // + // name must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required Name string `json:"name"` - // issuer describes atributes of the OIDC token issuer + + // issuer is a required field that configures how the platform interacts + // with the identity provider and how tokens issued from the identity provider + // are evaluated by the Kubernetes API server. // // +required Issuer TokenIssuer `json:"issuer"` - // oidcClients contains configuration for the platform's clients that - // need to request tokens from the issuer + // oidcClients is an optional field that configures how on-cluster, + // platform clients should request tokens from the identity provider. + // oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. // // +listType=map // +listMapKey=componentNamespace // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 + // +optional OIDCClients []OIDCClientConfig `json:"oidcClients"` - // claimMappings describes rules on how to transform information from an - // ID token into a cluster identity + // claimMappings is a required field that configures the rules to be used by + // the Kubernetes API server for translating claims in a JWT token, issued + // by the identity provider, to a cluster identity. + // + // +required ClaimMappings TokenClaimMappings `json:"claimMappings"` - // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // claimValidationRules is an optional field that configures the rules to + // be used by the Kubernetes API server for validating the claims in a JWT + // token issued by the identity provider. + // + // Validation rules are joined via an AND operation. // // +listType=atomic + // +optional ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` } @@ -226,17 +248,22 @@ type OIDCProvider struct { type TokenAudience string type TokenIssuer struct { - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. + // issuerURL is a required field that configures the URL used to issue tokens + // by the identity provider. + // The Kubernetes API server determines how authentication tokens should be handled + // by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + // + // issuerURL must use the 'https' scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` // +required URL string `json:"issuerURL"` - // audiences is an array of audiences that the token was issued for. - // Valid tokens must include at least one of these values in their - // "aud" claim. - // Must be set to exactly one value. + // audiences is a required field that configures the acceptable audiences + // the JWT token, issued by the identity provider, must be issued to. + // At least one of the entries must match the 'aud' claim in the JWT token. + // + // audiences must contain at least one entry and must not exceed ten entries. // // +listType=set // +kubebuilder:validation:MinItems=1 @@ -244,93 +271,293 @@ type TokenIssuer struct { // +required Audiences []TokenAudience `json:"audiences"` - // CertificateAuthority is a reference to a config map in the - // configuration namespace. The .data of the configMap must contain - // the "ca-bundle.crt" key. - // If unset, system trust is used instead. + // issuerCertificateAuthority is an optional field that configures the + // certificate authority, used by the Kubernetes API server, to validate + // the connection to the identity provider when fetching discovery information. + // + // When not specified, the system trust is used. + // + // When specified, it must reference a ConfigMap in the openshift-config + // namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' + // key in the data field of the ConfigMap. + // + // +optional CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` } type TokenClaimMappings struct { - // username is a name of the claim that should be used to construct - // usernames for the cluster identity. + // username is a required field that configures how the username of a cluster identity + // should be constructed from the claims in a JWT token issued by the identity provider. // - // Default value: "sub" - Username UsernameClaimMapping `json:"username,omitempty"` + // +required + Username UsernameClaimMapping `json:"username"` - // groups is a name of the claim that should be used to construct - // groups for the cluster identity. - // The referenced claim must use array of strings values. + // groups is an optional field that configures how the groups of a cluster identity + // should be constructed from the claims in a JWT token issued + // by the identity provider. + // When referencing a claim, if the claim is present in the JWT + // token, its value must be a list of groups separated by a comma (','). + // For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. + // + // +optional Groups PrefixedClaimMapping `json:"groups,omitempty"` + + // uid is an optional field for configuring the claim mapping + // used to construct the uid for the cluster identity. + // + // When using uid.claim to specify the claim it must be a single string value. + // When using uid.expression the expression must result in a single string value. + // + // When omitted, this means the user has no opinion and the platform + // is left to choose a default, which is subject to change over time. + // The current default is to use the 'sub' claim. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + UID *TokenClaimOrExpressionMapping `json:"uid,omitempty"` + + // extra is an optional field for configuring the mappings + // used to construct the extra attribute for the cluster identity. + // When omitted, no extra attributes will be present on the cluster identity. + // key values for extra mappings must be unique. + // A maximum of 64 extra attribute mappings may be provided. + // + // +optional + // +kubebuilder:validation:MaxItems=64 + // +listType=map + // +listMapKey=key + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + Extra []ExtraMapping `json:"extra,omitempty"` } +// TokenClaimMapping allows specifying a JWT token +// claim to be used when mapping claims from an +// authentication token to cluster identities. type TokenClaimMapping struct { - // claim is a JWT token claim to be used in the mapping + // claim is a required field that configures the JWT token + // claim whose value is assigned to the cluster identity + // field associated with this mapping. // // +required Claim string `json:"claim"` } +// TokenClaimOrExpressionMapping allows specifying either a JWT +// token claim or CEL expression to be used when mapping claims +// from an authentication token to cluster identities. +// +kubebuilder:validation:XValidation:rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" +type TokenClaimOrExpressionMapping struct { + // claim is an optional field for specifying the + // JWT token claim that is used in the mapping. + // The value of this claim will be assigned to + // the field in which this mapping is associated. + // + // Precisely one of claim or expression must be set. + // claim must not be specified when expression is set. + // When specified, claim must be at least 1 character in length + // and must not exceed 256 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:MinLength=1 + Claim string `json:"claim,omitempty"` + + // expression is an optional field for specifying a + // CEL expression that produces a string value from + // JWT token claims. + // + // CEL expressions have access to the token claims + // through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // Precisely one of claim or expression must be set. + // expression must not be specified when claim is set. + // When specified, expression must be at least 1 character in length + // and must not exceed 4096 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=4096 + // +kubebuilder:validation:MinLength=1 + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping allows specifying a key and CEL expression +// to evaluate the keys' value. It is used to create additional +// mappings and attributes added to a cluster identity from +// a provided authentication token. +type ExtraMapping struct { + // key is a required field that specifies the string + // to use as the extra attribute key. + // + // key must be a domain-prefix path (e.g 'example.org/foo'). + // key must not exceed 510 characters in length. + // key must contain the '/' character, separating the domain and path characters. + // key must not be empty. + // + // The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. + // It must not exceed 253 characters in length. + // It must start and end with an alphanumeric character. + // It must only contain lower case alphanumeric characters and '-' or '.'. + // It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". + // + // The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one + // alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + // It must not exceed 256 characters in length. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=510 + // +kubebuilder:validation:XValidation:rule="self.contains('/')",message="key must contain the '/' character" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="the domain of the key must consist of only lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].size() <= 253",message="the domain of the key must not exceed 253 characters in length" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'kubernetes.io'",message="the domain 'kubernetes.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.kubernetes.io')",message="the subdomains '*.kubernetes.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'k8s.io'",message="the domain 'k8s.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.k8s.io')",message="the subdomains '*.k8s.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'openshift.io'",message="the domain 'openshift.io' is reserved for OpenShift use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.openshift.io')",message="the subdomains '*.openshift.io' are reserved for OpenShift use" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].matches('[A-Za-z0-9/\\\\-._~%!$&\\'()*+;=:]+')",message="the path of the key must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, apostrophe, '-', '.', '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=', and ':'" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].size() <= 256",message="the path of the key must not exceed 256 characters in length" + Key string `json:"key"` + + // valueExpression is a required field to specify the CEL expression to extract + // the extra attribute value from a JWT token's claims. + // valueExpression must produce a string or string array value. + // "", [], and null are treated as the extra mapping not being present. + // Empty string values within an array are filtered out. + // + // CEL expressions have access to the token claims + // through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // valueExpression must not exceed 4096 characters in length. + // valueExpression must not be empty. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + ValueExpression string `json:"valueExpression"` +} + +// OIDCClientConfig configures how platform clients +// interact with identity providers as an authentication +// method type OIDCClientConfig struct { - // componentName is the name of the component that is supposed to consume this - // client configuration + // componentName is a required field that specifies the name of the platform + // component being configured to use the identity provider as an authentication mode. + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +required ComponentName string `json:"componentName"` - // componentNamespace is the namespace of the component that is supposed to consume this - // client configuration + // componentNamespace is a required field that specifies the namespace in which the + // platform component being configured to use the identity provider as an authentication + // mode is running. + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +required ComponentNamespace string `json:"componentNamespace"` - // clientID is the identifier of the OIDC client from the OIDC provider + // clientID is a required field that configures the client identifier, from + // the identity provider, that the platform component uses for authentication + // requests made to the identity provider. + // The identity provider must accept this identifier for platform components + // to be able to use the identity provider as an authentication mode. + // + // clientID must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required ClientID string `json:"clientID"` - // clientSecret refers to a secret in the `openshift-config` namespace that - // contains the client secret in the `clientSecret` key of the `.data` field + // clientSecret is an optional field that configures the client secret used + // by the platform component when making authentication requests to the identity provider. + // + // When not specified, no client secret will be used when making authentication requests + // to the identity provider. + // + // When specified, clientSecret references a Secret in the 'openshift-config' + // namespace that contains the client secret in the 'clientSecret' key of the '.data' field. + // The client secret will be used when making authentication requests to the identity provider. + // + // Public clients do not require a client secret but private + // clients do require a client secret to work with the identity provider. + // + // +optional ClientSecret SecretNameReference `json:"clientSecret"` - // extraScopes is an optional set of scopes to request tokens with. + // extraScopes is an optional field that configures the extra scopes that should + // be requested by the platform component when making authentication requests to the + // identity provider. + // This is useful if you have configured claim mappings that requires specific + // scopes to be requested beyond the standard OIDC scopes. + // + // When omitted, no additional scopes are requested. // // +listType=set + // +optional ExtraScopes []string `json:"extraScopes"` } +// OIDCClientStatus represents the current state +// of platform components and how they interact with +// the configured identity providers. type OIDCClientStatus struct { - // componentName is the name of the component that will consume a client configuration. + // componentName is a required field that specifies the name of the platform + // component using the identity provider as an authentication mode. + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +required ComponentName string `json:"componentName"` - // componentNamespace is the namespace of the component that will consume a client configuration. + // componentNamespace is a required field that specifies the namespace in which the + // platform component using the identity provider as an authentication + // mode is running. + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +required ComponentNamespace string `json:"componentNamespace"` - // currentOIDCClients is a list of clients that the component is currently using. + // currentOIDCClients is an optional list of clients that the component is currently using. + // Entries must have unique issuerURL/clientID pairs. // // +listType=map // +listMapKey=issuerURL // +listMapKey=clientID + // +optional CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - // consumingUsers is a slice of ServiceAccounts that need to have read - // permission on the `clientSecret` secret. + // consumingUsers is an optional list of ServiceAccounts requiring + // read permissions on the `clientSecret` secret. + // + // consumingUsers must not exceed 5 entries. // // +kubebuilder:validation:MaxItems=5 // +listType=set + // +optional ConsumingUsers []ConsumingUser `json:"consumingUsers"` // conditions are used to communicate the state of the `oidcClients` entry. @@ -347,21 +574,32 @@ type OIDCClientStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } +// OIDCClientReference is a reference to a platform component +// client configuration. type OIDCClientReference struct { - // OIDCName refers to the `name` of the provider from `oidcProviders` + // oidcProviderName is a required reference to the 'name' of the identity provider + // configured in 'oidcProviders' that this client is associated with. + // + // oidcProviderName must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required OIDCProviderName string `json:"oidcProviderName"` - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. + // issuerURL is a required field that specifies the URL of the identity + // provider that this client is configured to make requests against. + // + // issuerURL must use the 'https' scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` // +required IssuerURL string `json:"issuerURL"` - // clientID is the identifier of the OIDC client from the OIDC provider + // clientID is a required field that specifies the client identifier, from + // the identity provider, that the platform component is using for authentication + // requests made to the identity provider. + // + // clientID must not be empty. // // +kubebuilder:validation:MinLength=1 // +required @@ -369,35 +607,61 @@ type OIDCClientReference struct { } // +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +// +union type UsernameClaimMapping struct { - TokenClaimMapping `json:",inline"` + // claim is a required field that configures the JWT token + // claim whose value is assigned to the cluster identity + // field associated with this mapping. + // + // claim must not be an empty string ("") and must not exceed 256 characters. + // + // +required + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=256 + Claim string `json:"claim"` - // prefixPolicy specifies how a prefix should apply. + // prefixPolicy is an optional field that configures how a prefix should be + // applied to the value of the JWT claim specified in the 'claim' field. + // + // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). // - // By default, claims other than `email` will be prefixed with the issuer URL to - // prevent naming clashes with other plugins. + // When set to 'Prefix', the value specified in the prefix field will be + // prepended to the value of the JWT claim. + // The prefix field must be set when prefixPolicy is 'Prefix'. // - // Set to "NoPrefix" to disable prefixing. + // When set to 'NoPrefix', no prefix will be prepended to the value + // of the JWT claim. // - // Example: - // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". - // If the JWT claim `username` contains value `userA`, the resulting - // mapped value will be "myoidc:userA". - // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the - // JWT `email` claim contains value "userA@myoidc.tld", the resulting - // mapped value will be "myoidc:userA@myoidc.tld". - // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - // and `claim` is set to: - // (a) "username": the mapped value will be "https://myoidc.tld#userA" - // (b) "email": the mapped value will be "userA@myoidc.tld" + // When omitted, this means no opinion and the platform is left to choose + // any prefixes that are applied which is subject to change over time. + // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim + // when the claim is not 'email'. + // As an example, consider the following scenario: + // `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // - "username": the mapped value will be "https://myoidc.tld#userA" + // - "email": the mapped value will be "userA@myoidc.tld" // // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + // +optional + // +unionDiscriminator PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + // prefix configures the prefix that should be prepended to the value + // of the JWT claim. + // + // prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. + // + // +optional + // +unionMember Prefix *UsernamePrefix `json:"prefix"` } +// UsernamePrefixPolicy configures how prefixes should be applied +// to values extracted from the JWT claims during the process of mapping +// JWT claims to cluster identity attributes. +// +enum type UsernamePrefixPolicy string var ( @@ -412,26 +676,42 @@ var ( Prefix UsernamePrefixPolicy = "Prefix" ) +// UsernamePrefix configures the string that should +// be used as a prefix for username claim mappings. type UsernamePrefix struct { + // prefixString is a required field that configures the prefix that will + // be applied to cluster identity username attribute + // during the process of mapping JWT claims to cluster identity attributes. + // + // prefixString must not be an empty string (""). + // // +kubebuilder:validation:MinLength=1 // +required PrefixString string `json:"prefixString"` } +// PrefixedClaimMapping configures a claim mapping +// that allows for an optional prefix. type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` - // prefix is a string to prefix the value from the token in the result of the - // claim mapping. + // prefix is an optional field that configures the prefix that will be + // applied to the cluster identity attribute during the process of mapping + // JWT claims to cluster identity attributes. // - // By default, no prefixing occurs. + // When omitted (""), no prefix is applied to the cluster identity attribute. // - // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains // an array of strings "a", "b" and "c", the mapping will result in an // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + // + // +optional Prefix string `json:"prefix"` } +// TokenValidationRuleType represents the different +// claim validation rule types that can be configured. +// +enum type TokenValidationRuleType string const ( @@ -439,26 +719,45 @@ const ( ) type TokenClaimValidationRule struct { - // type sets the type of the validation rule + // type is an optional field that configures the type of the validation rule. + // + // Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). + // + // When set to 'RequiredClaim', the Kubernetes API server + // will be configured to validate that the incoming JWT + // contains the required claim and that its value matches + // the required value. + // + // Defaults to 'RequiredClaim'. // // +kubebuilder:validation:Enum={"RequiredClaim"} // +kubebuilder:default="RequiredClaim" Type TokenValidationRuleType `json:"type"` - // requiredClaim allows configuring a required claim name and its expected - // value - RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` + // requiredClaim is an optional field that configures the required claim + // and value that the Kubernetes API server will use to validate if an incoming + // JWT is valid for this identity provider. + // + // +optional + RequiredClaim *TokenRequiredClaim `json:"requiredClaim,omitempty"` } type TokenRequiredClaim struct { - // claim is a name of a required claim. Only claims with string values are - // supported. + // claim is a required field that configures the name of the required claim. + // When taken from the JWT claims, claim must be a string value. + // + // claim must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required Claim string `json:"claim"` - // requiredValue is the required value for the claim. + // requiredValue is a required field that configures the value that 'claim' must + // have when taken from the incoming JWT claims. + // If the value in the JWT claims does not match, the token + // will be rejected for authentication. + // + // requiredValue must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go new file mode 100644 index 0000000000..ca604e05c5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go @@ -0,0 +1,87 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicy holds cluster-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec contains the configuration for the cluster image policy. + // +required + Spec ClusterImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ClusterImagePolicyStatus `json:"status"` +} + +// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource. +type ClusterImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +k8s:deepcopy-gen=true +type ClusterImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicyList is a list of ClusterImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterImagePolices + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ClusterImagePolicy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 092bebff09..54e1de94ce 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -83,8 +83,8 @@ type ClusterVersionSpec struct { // // +optional Upstream URL `json:"upstream,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be + // channel is an identifier for explicitly requesting a non-default set + // of updates to be applied to this cluster. The default channel will // contain stable updates that are appropriate for production clusters. // // +optional @@ -163,6 +163,7 @@ type ClusterVersionStatus struct { VersionHash string `json:"versionHash"` // capabilities describes the state of optional, core cluster components. + // +optional Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` // conditions provides information about the cluster version. The condition diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index 0ccc4a8f85..dc6967bf15 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -45,6 +45,7 @@ type ConsoleSpec struct { type ConsoleStatus struct { // The URL for the console. This will be derived from the host for the route that // is created for the console. + // +optional ConsoleURL string `json:"consoleURL"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 0709a75ae8..169e29c5c5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -112,6 +112,7 @@ type FeatureGateStatus struct { // Only featureGates with .version in the ClusterVersion.status will be present in this list. // +listType=map // +listMapKey=version + // +optional FeatureGates []FeatureGateDetails `json:"featureGates"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_policy.go new file mode 100644 index 0000000000..54bd21adb4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_policy.go @@ -0,0 +1,322 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicy holds namespace-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec holds user settable values for configuration + // +required + Spec ImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImagePolicyStatus `json:"status"` +} + +// ImagePolicySpec is the specification of the ImagePolicy CRD. +type ImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" +// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" +// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope" +// +kubebuilder:validation:MaxLength=512 +type ImageScope string + +// Policy defines the verification policy for the items in the scopes list. +type Policy struct { + // rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + // This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + // +required + RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` + // signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + // The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + // +optional + SignedIdentity *PolicyIdentity `json:"signedIdentity,omitempty"` +} + +// PolicyRootOfTrust defines the root of trust based on the selected policyType. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SigstoreImageVerificationPKI,rule="has(self.policyType) && self.policyType == 'PKI' ? has(self.pki) : !has(self.pki)",message="pki is required when policyType is PKI, and forbidden otherwise" +type PolicyRootOfTrust struct { + // policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + // Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + // When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + // When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + // +unionDiscriminator + // +required + PolicyType PolicyType `json:"policyType"` + // publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + // publicKey is required when policyType is PublicKey, and forbidden otherwise. + // +optional + PublicKey *PublicKey `json:"publicKey,omitempty"` + // fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + // fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + // For more information about Fulcio and Rekor, please refer to the document at: + // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + // +optional + FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` + // pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. + // pki is required when policyType is PKI, and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI + PKI *PKI `json:"pki,omitempty"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate=SigstoreImageVerificationPKI,enum=PublicKey;FulcioCAWithRekor;PKI +type PolicyType string + +const ( + PublicKeyRootOfTrust PolicyType = "PublicKey" + FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" + PKIRootOfTrust PolicyType = "PKI" +) + +// PublicKey defines the root of trust based on a sigstore public key. +type PublicKey struct { + // keyData is a required field contains inline base64-encoded data for the PEM format public key. + // keyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=68 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the keyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the keyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + KeyData []byte `json:"keyData"` + // rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +optional + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. +type FulcioCAWithRekor struct { + // fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + // fulcioCAData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the fulcioCAData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the fulcioCAData must end with base64 encoding of '-----END CERTIFICATE-----'." + FulcioCAData []byte `json:"fulcioCAData"` + // rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData"` + // fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration. + // +required + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` +} + +// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. +type PolicyFulcioSubject struct { + // oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + // It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + // When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + // Example: "https://expected.OIDC.issuer/" + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" + OIDCIssuer string `json:"oidcIssuer"` + // signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + // The signedEmail must be a valid email address and at most 320 characters in length. + // Example: "expected-signing-user@example.com" + // +required + // +kubebuilder:validation:MaxLength=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + SignedEmail string `json:"signedEmail"` +} + +// PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type PKI struct { + // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caRootsData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caRootsData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caRootsData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + CertificateAuthorityRootsData []byte `json:"caRootsData"` + // caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. + // caIntermediatesData requires caRootsData to be set. + // +optional + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caIntermediatesData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caIntermediatesData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caIntermediatesData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued. + // +required + PKICertificateSubject PKICertificateSubject `json:"pkiCertificateSubject"` +} + +// PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued. +// +kubebuilder:validation:XValidation:rule="has(self.email) || has(self.hostname)", message="at least one of email or hostname must be set in pkiCertificateSubject" +// +openshift:enable:FeatureGate=SigstoreImageVerificationPKI +type PKICertificateSubject struct { + // email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. + // The email must be a valid email address and at most 320 characters in length. + // +optional + // +kubebuilder:validation:MaxLength:=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + Email string `json:"email,omitempty"` + // hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. + // The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. + // It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk. + // +optional + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="self.startsWith('*.') ? !format.dns1123Subdomain().validate(self.replace('*.', '', 1)).hasValue() : !format.dns1123Subdomain().validate(self).hasValue()",message="hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.'. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk." + Hostname string `json:"hostname,omitempty"` +} + +// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" +// +union +type PolicyIdentity struct { + // matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + // Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + // When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + // When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + // When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + // When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + // +unionDiscriminator + // +required + MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` + // exactRepository specifies the repository that must be exactly matched by the identity in the signature. + // exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + // +optional + PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"` + // remapIdentity specifies the prefix remapping rule for verifying image identity. + // remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + // +optional + PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"` +} + +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest" +// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity. The repository or prefix must starts with 'localhost' or a valid '.' separated domain. If contains registry paths, the path component names must start with at least one letter or number, with following parts able to be separated by one period, one or two underscore and multiple dashes." +type IdentityRepositoryPrefix string + +type PolicyMatchExactRepository struct { + // repository is the reference of the image identity to be matched. + // repository is required if matchPolicy is set to "ExactRepository". + // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + // +required + Repository IdentityRepositoryPrefix `json:"repository"` +} + +type PolicyMatchRemapIdentity struct { + // prefix is required if matchPolicy is set to "RemapIdentity". + // prefix is the prefix of the image identity to be matched. + // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + // This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + Prefix IdentityRepositoryPrefix `json:"prefix"` + // signedPrefix is required if matchPolicy is set to "RemapIdentity". + // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` +} + +// IdentityMatchPolicy defines the type of matching for "matchPolicy". +// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity +type IdentityMatchPolicy string + +const ( + IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact" + IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository" + IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository" + IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity" +) + +// +k8s:deepcopy-gen=true +type ImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyList is a list of ImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ImagePolicies + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ImagePolicy `json:"items"` +} + +const ( + // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + ImagePolicyPending = "Pending" + // ImagePolicyApplied indicates that the policy has been applied + ImagePolicyApplied = "Applied" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index e0c46ade11..1fc06418c7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -62,11 +62,13 @@ type InfrastructureStatus struct { // infrastructureName uniquely identifies a cluster with a human friendly name. // Once set it should not be changed. Must be of max length 27 and must have only // alphanumeric or hyphen characters. + // +optional InfrastructureName string `json:"infrastructureName"` // platform is the underlying infrastructure provider for the cluster. // // Deprecated: Use platformStatus.type instead. + // +optional Platform PlatformType `json:"platform,omitempty"` // platformStatus holds status information specific to the underlying @@ -78,17 +80,20 @@ type InfrastructureStatus struct { // etcd servers and clients. // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + // +optional EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` // apiServerURL is a valid URI with scheme 'https', address and // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console // to tell users where to find the Kubernetes API. + // +optional APIServerURL string `json:"apiServerURL"` // apiServerInternalURL is a valid URI with scheme 'https', // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components // like kubelets, to contact the Kubernetes API server using the // infrastructure provider rather than Kubernetes networking. + // +optional APIServerInternalURL string `json:"apiServerInternalURI"` // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. @@ -99,7 +104,10 @@ type InfrastructureStatus struct { // its components are not visible within the cluster. // +kubebuilder:default=HighlyAvailable // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External - // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=DualReplica,enum=HighlyAvailable;SingleReplica;DualReplica;External + // +openshift:validation:FeatureGateAwareEnum:requiredFeatureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +optional ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control @@ -111,7 +119,8 @@ type InfrastructureStatus struct { // NOTE: External topology mode is not applicable for this field. // +kubebuilder:default=HighlyAvailable // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica - InfrastructureTopology TopologyMode `json:"infrastructureTopology"` + // +optional + InfrastructureTopology TopologyMode `json:"infrastructureTopology,omitempty"` // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. @@ -628,7 +637,7 @@ const ( ) // GCPServiceEndpointName is the name of the GCP Service Endpoint. -// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;ServiceUsage;Storage;TagManager +// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;ServiceUsage;Storage type GCPServiceEndpointName string const ( @@ -655,9 +664,6 @@ const ( // GCPServiceEndpointNameStorage is the name used for the GCP Storage Service endpoint. GCPServiceEndpointNameStorage GCPServiceEndpointName = "Storage" - - // GCPServiceEndpointNameTagManager is the name used for the GCP Tag Manager Service endpoint. - GCPServiceEndpointNameTagManager GCPServiceEndpointName = "TagManager" ) // GCPServiceEndpoint store the configuration of a custom url to @@ -695,8 +701,8 @@ type GCPServiceEndpoint struct { type GCPPlatformSpec struct{} // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type GCPPlatformStatus struct { // resourceGroupName is the Project ID for new GCP resources created for the cluster. ProjectID string `json:"projectID"` @@ -713,7 +719,6 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. @@ -724,7 +729,6 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` // This field was introduced and removed under tech preview. @@ -753,7 +757,7 @@ type GCPPlatformStatus struct { // The maximum number of endpoint overrides allowed is 9. // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=9 + // +kubebuilder:validation:MaxItems=8 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" // +optional // +openshift:enable:FeatureGate=GCPCustomAPIEndpoints @@ -1006,7 +1010,6 @@ type BareMetalPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` @@ -1220,7 +1223,6 @@ type OvirtPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` } @@ -1557,8 +1559,7 @@ type VSpherePlatformSpec struct { // + If VCenters is not defined use the existing cloud-config configmap defined // + in openshift-config. // +kubebuilder:validation:MinItems=0 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 + // +kubebuilder:validation:MaxItems=3 // +kubebuilder:validation:XValidation:rule="size(self) != size(oldSelf) ? size(oldSelf) == 0 && size(self) < 2 : true",message="vcenters cannot be added or removed once set" // +listType=atomic // +optional @@ -1670,7 +1671,6 @@ type VSpherePlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` @@ -2088,7 +2088,6 @@ type NutanixPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 9492e08a72..f70fe8f440 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -150,7 +150,7 @@ type AWSIngressSpec struct { // +unionDiscriminator // +kubebuilder:validation:Enum:=NLB;Classic // +required - Type AWSLBType `json:"type,omitempty"` + Type AWSLBType `json:"type"` } type AWSLBType string diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 41dc2eb97b..c0d1602b37 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -93,20 +93,25 @@ type NetworkSpec struct { type NetworkStatus struct { // IP address pool to use for pod IPs. // +listType=atomic + // +optional ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` // IP address pool for services. // Currently, we only support a single entry here. // +listType=atomic + // +optional ServiceNetwork []string `json:"serviceNetwork,omitempty"` // networkType is the plugin that is deployed (e.g. OVNKubernetes). + // +optional NetworkType string `json:"networkType,omitempty"` // clusterNetworkMTU is the MTU for inter-pod networking. + // +optional ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` // migration contains the cluster network migration configuration. + // +optional Migration *NetworkMigration `json:"migration,omitempty"` // conditions represents the observations of a network.config current state. diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 1fddfa51e5..a4971a20c5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -28,6 +28,7 @@ type OperatorHubSpec struct { type OperatorHubStatus struct { // sources encapsulates the result of applying the configuration for each // hub source + // +optional Sources []HubSourceStatus `json:"sources,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 22bd77fc5b..70edc17699 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1024,6 +1024,112 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy. +func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy { + if in == nil { + return nil + } + out := new(ClusterImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList. +func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList { + if in == nil { + return nil + } + out := new(ClusterImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec. +func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec { + if in == nil { + return nil + } + out := new(ClusterImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus. +func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in @@ -2021,6 +2127,22 @@ func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { *out = *in @@ -2213,6 +2335,33 @@ func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor. +func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor { + if in == nil { + return nil + } + out := new(FulcioCAWithRekor) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { *out = *in @@ -2906,6 +3055,112 @@ func (in *ImageList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy. +func (in *ImagePolicy) DeepCopy() *ImagePolicy { + if in == nil { + return nil + } + out := new(ImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList. +func (in *ImagePolicyList) DeepCopy() *ImagePolicyList { + if in == nil { + return nil + } + out := new(ImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec. +func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec { + if in == nil { + return nil + } + out := new(ImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus. +func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { + if in == nil { + return nil + } + out := new(ImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -4716,6 +4971,49 @@ func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKI) DeepCopyInto(out *PKI) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKI. +func (in *PKI) DeepCopy() *PKI { + if in == nil { + return nil + } + out := new(PKI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKICertificateSubject. +func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { + if in == nil { + return nil + } + out := new(PKICertificateSubject) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = *in @@ -4888,6 +5186,133 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + if in.SignedIdentity != nil { + in, out := &in.SignedIdentity, &out.SignedIdentity + *out = new(PolicyIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject. +func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject { + if in == nil { + return nil + } + out := new(PolicyFulcioSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) { + *out = *in + if in.PolicyMatchExactRepository != nil { + in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository + *out = new(PolicyMatchExactRepository) + **out = **in + } + if in.PolicyMatchRemapIdentity != nil { + in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity + *out = new(PolicyMatchRemapIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity. +func (in *PolicyIdentity) DeepCopy() *PolicyIdentity { + if in == nil { + return nil + } + out := new(PolicyIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository. +func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository { + if in == nil { + return nil + } + out := new(PolicyMatchExactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity. +func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity { + if in == nil { + return nil + } + out := new(PolicyMatchRemapIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(PublicKey) + (*in).DeepCopyInto(*out) + } + if in.FulcioCAWithRekor != nil { + in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor + *out = new(FulcioCAWithRekor) + (*in).DeepCopyInto(*out) + } + if in.PKI != nil { + in, out := &in.PKI, &out.PKI + *out = new(PKI) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust. +func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { + if in == nil { + return nil + } + out := new(PolicyRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { *out = *in @@ -5188,6 +5613,32 @@ func (in *ProxyStatus) DeepCopy() *ProxyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicKey) DeepCopyInto(out *PublicKey) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey. +func (in *PublicKey) DeepCopy() *PublicKey { + if in == nil { + return nil + } + out := new(PublicKey) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { *out = *in @@ -5727,6 +6178,16 @@ func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { *out = *in in.Username.DeepCopyInto(&out.Username) out.Groups = in.Groups + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(TokenClaimOrExpressionMapping) + **out = **in + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } return } @@ -5740,6 +6201,22 @@ func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimOrExpressionMapping) DeepCopyInto(out *TokenClaimOrExpressionMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimOrExpressionMapping. +func (in *TokenClaimOrExpressionMapping) DeepCopy() *TokenClaimOrExpressionMapping { + if in == nil { + return nil + } + out := new(TokenClaimOrExpressionMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { *out = *in @@ -5860,7 +6337,6 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { *out = *in - out.TokenClaimMapping = in.TokenClaimMapping if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(UsernamePrefix) diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 6ced2292ac..91881630b8 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -30,6 +30,7 @@ authentications.config.openshift.io: Category: "" FeatureGates: - ExternalOIDC + - ExternalOIDCWithUIDAndExtraClaimMappings FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -65,6 +66,30 @@ builds.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + clusteroperators.config.openshift.io: Annotations: include.release.openshift.io/self-managed-high-availability: "true" @@ -281,6 +306,30 @@ imagedigestmirrorsets.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + imagetagmirrorsets.config.openshift.io: Annotations: release.openshift.io/bootstrap-required: "true" @@ -313,17 +362,15 @@ infrastructures.config.openshift.io: Category: "" FeatureGates: - AWSClusterHostedDNS - - BareMetalLoadBalancer - DualReplica - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNS - GCPCustomAPIEndpoints - - GCPLabelsTags - HighlyAvailableArbiter + - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets - VSphereHostVMGroupZonal - VSphereMultiNetworks - - VSphereMultiVCenters FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 83d16f09f0..eb78ad7ca6 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -396,12 +396,23 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { return map_DeprecatedWebhookTokenAuthenticator } +var map_ExtraMapping = map[string]string{ + "": "ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token.", + "key": "key is a required field that specifies the string to use as the extra attribute key.\n\nkey must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. key must contain the '/' character, separating the domain and path characters. key must not be empty.\n\nThe domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. It must not exceed 253 characters in length. It must start and end with an alphanumeric character. It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, \"kubernetes.io\", \"k8s.io\", and \"openshift.io\".\n\nThe path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length.", + "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 4096 characters in length. valueExpression must not be empty.", +} + +func (ExtraMapping) SwaggerDoc() map[string]string { + return map_ExtraMapping +} + var map_OIDCClientConfig = map[string]string{ - "componentName": "componentName is the name of the component that is supposed to consume this client configuration", - "componentNamespace": "componentNamespace is the namespace of the component that is supposed to consume this client configuration", - "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", - "clientSecret": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", - "extraScopes": "extraScopes is an optional set of scopes to request tokens with.", + "": "OIDCClientConfig configures how platform clients interact with identity providers as an authentication method", + "componentName": "componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. It is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "clientID": "clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode.\n\nclientID must not be an empty string (\"\").", + "clientSecret": "clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider.\n\nWhen not specified, no client secret will be used when making authentication requests to the identity provider.\n\nWhen specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider.\n\nPublic clients do not require a client secret but private clients do require a client secret to work with the identity provider.", + "extraScopes": "extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes.\n\nWhen omitted, no additional scopes are requested.", } func (OIDCClientConfig) SwaggerDoc() map[string]string { @@ -409,9 +420,10 @@ func (OIDCClientConfig) SwaggerDoc() map[string]string { } var map_OIDCClientReference = map[string]string{ - "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", + "": "OIDCClientReference is a reference to a platform component client configuration.", + "oidcProviderName": "oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with.\n\noidcProviderName must not be an empty string (\"\").", + "issuerURL": "issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against.\n\nissuerURL must use the 'https' scheme.", + "clientID": "clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider.\n\nclientID must not be empty.", } func (OIDCClientReference) SwaggerDoc() map[string]string { @@ -419,10 +431,11 @@ func (OIDCClientReference) SwaggerDoc() map[string]string { } var map_OIDCClientStatus = map[string]string{ - "componentName": "componentName is the name of the component that will consume a client configuration.", - "componentNamespace": "componentNamespace is the namespace of the component that will consume a client configuration.", - "currentOIDCClients": "currentOIDCClients is a list of clients that the component is currently using.", - "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "": "OIDCClientStatus represents the current state of platform components and how they interact with the configured identity providers.", + "componentName": "componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. It is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "currentOIDCClients": "currentOIDCClients is an optional list of clients that the component is currently using. Entries must have unique issuerURL/clientID pairs.", + "consumingUsers": "consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret.\n\nconsumingUsers must not exceed 5 entries.", "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", } @@ -431,11 +444,11 @@ func (OIDCClientStatus) SwaggerDoc() map[string]string { } var map_OIDCProvider = map[string]string{ - "name": "name of the OIDC provider", - "issuer": "issuer describes atributes of the OIDC token issuer", - "oidcClients": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer", - "claimMappings": "claimMappings describes rules on how to transform information from an ID token into a cluster identity", - "claimValidationRules": "claimValidationRules are rules that are applied to validate token claims to authenticate users.", + "name": "name is a required field that configures the unique human-readable identifier associated with the identity provider. It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics.\n\nname must not be an empty string (\"\").", + "issuer": "issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server.", + "oidcClients": "oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs.", + "claimMappings": "claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", + "claimValidationRules": "claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider.\n\nValidation rules are joined via an AND operation.", } func (OIDCProvider) SwaggerDoc() map[string]string { @@ -443,7 +456,8 @@ func (OIDCProvider) SwaggerDoc() map[string]string { } var map_PrefixedClaimMapping = map[string]string{ - "prefix": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "": "PrefixedClaimMapping configures a claim mapping that allows for an optional prefix.", + "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted (\"\"), no prefix is applied to the cluster identity attribute.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -451,7 +465,8 @@ func (PrefixedClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMapping = map[string]string{ - "claim": "claim is a JWT token claim to be used in the mapping", + "": "TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.", } func (TokenClaimMapping) SwaggerDoc() map[string]string { @@ -459,17 +474,29 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMappings = map[string]string{ - "username": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", - "groups": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", + "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", + "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). For example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", + "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. The current default is to use the 'sub' claim.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 64 extra attribute mappings may be provided.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { return map_TokenClaimMappings } +var map_TokenClaimOrExpressionMapping = map[string]string{ + "": "TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated.\n\nPrecisely one of claim or expression must be set. claim must not be specified when expression is set. When specified, claim must be at least 1 character in length and must not exceed 256 characters in length.", + "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 4096 characters in length.", +} + +func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { + return map_TokenClaimOrExpressionMapping +} + var map_TokenClaimValidationRule = map[string]string{ - "type": "type sets the type of the validation rule", - "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value", + "type": "type is an optional field that configures the type of the validation rule.\n\nAllowed values are 'RequiredClaim' and omitted (not provided or an empty string).\n\nWhen set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value.\n\nDefaults to 'RequiredClaim'.", + "requiredClaim": "requiredClaim is an optional field that configures the required claim and value that the Kubernetes API server will use to validate if an incoming JWT is valid for this identity provider.", } func (TokenClaimValidationRule) SwaggerDoc() map[string]string { @@ -477,9 +504,9 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { } var map_TokenIssuer = map[string]string{ - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "audiences": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", - "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", + "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nissuerURL must use the 'https' scheme.", + "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", + "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", } func (TokenIssuer) SwaggerDoc() map[string]string { @@ -487,8 +514,8 @@ func (TokenIssuer) SwaggerDoc() map[string]string { } var map_TokenRequiredClaim = map[string]string{ - "claim": "claim is a name of a required claim. Only claims with string values are supported.", - "requiredValue": "requiredValue is the required value for the claim.", + "claim": "claim is a required field that configures the name of the required claim. When taken from the JWT claims, claim must be a string value.\n\nclaim must not be an empty string (\"\").", + "requiredValue": "requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. If the value in the JWT claims does not match, the token will be rejected for authentication.\n\nrequiredValue must not be an empty string (\"\").", } func (TokenRequiredClaim) SwaggerDoc() map[string]string { @@ -496,13 +523,24 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { } var map_UsernameClaimMapping = map[string]string{ - "prefixPolicy": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", + "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. As an example, consider the following scenario:\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", } func (UsernameClaimMapping) SwaggerDoc() map[string]string { return map_UsernameClaimMapping } +var map_UsernamePrefix = map[string]string{ + "": "UsernamePrefix configures the string that should be used as a prefix for username claim mappings.", + "prefixString": "prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes.\n\nprefixString must not be an empty string (\"\").", +} + +func (UsernamePrefix) SwaggerDoc() map[string]string { + return map_UsernamePrefix +} + var map_WebhookTokenAuthenticator = map[string]string{ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", @@ -573,6 +611,45 @@ func (ImageLabel) SwaggerDoc() map[string]string { return map_ImageLabel } +var map_ClusterImagePolicy = map[string]string{ + "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the configuration for the cluster image policy.", + "status": "status contains the observed state of the resource.", +} + +func (ClusterImagePolicy) SwaggerDoc() map[string]string { + return map_ClusterImagePolicy +} + +var map_ClusterImagePolicyList = map[string]string{ + "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterImagePolices", +} + +func (ClusterImagePolicyList) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyList +} + +var map_ClusterImagePolicySpec = map[string]string{ + "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ClusterImagePolicySpec) SwaggerDoc() map[string]string { + return map_ClusterImagePolicySpec +} + +var map_ClusterImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyStatus +} + var map_ClusterOperator = map[string]string{ "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -702,7 +779,7 @@ var map_ClusterVersionSpec = map[string]string{ "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", - "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", @@ -1137,6 +1214,147 @@ func (ImageDigestMirrors) SwaggerDoc() map[string]string { return map_ImageDigestMirrors } +var map_FulcioCAWithRekor = map[string]string{ + "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", + "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (FulcioCAWithRekor) SwaggerDoc() map[string]string { + return map_FulcioCAWithRekor +} + +var map_ImagePolicy = map[string]string{ + "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImagePolicy) SwaggerDoc() map[string]string { + return map_ImagePolicy +} + +var map_ImagePolicyList = map[string]string{ + "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ImagePolicies", +} + +func (ImagePolicyList) SwaggerDoc() map[string]string { + return map_ImagePolicyList +} + +var map_ImagePolicySpec = map[string]string{ + "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ImagePolicySpec) SwaggerDoc() map[string]string { + return map_ImagePolicySpec +} + +var map_ImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource. condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid.", +} + +func (ImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ImagePolicyStatus +} + +var map_PKI = map[string]string{ + "": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (PKI) SwaggerDoc() map[string]string { + return map_PKI +} + +var map_PKICertificateSubject = map[string]string{ + "": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "email": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email must be a valid email address and at most 320 characters in length.", + "hostname": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", +} + +func (PKICertificateSubject) SwaggerDoc() map[string]string { + return map_PKICertificateSubject +} + +var map_Policy = map[string]string{ + "": "Policy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", + "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_PolicyFulcioSubject = map[string]string{ + "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", + "oidcIssuer": "oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", + "signedEmail": "signedEmail is a required field holds the email address that the Fulcio certificate is issued for. The signedEmail must be a valid email address and at most 320 characters in length. Example: \"expected-signing-user@example.com\"", +} + +func (PolicyFulcioSubject) SwaggerDoc() map[string]string { + return map_PolicyFulcioSubject +} + +var map_PolicyIdentity = map[string]string{ + "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".", + "matchPolicy": "matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. Allowed values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". When set to \"MatchRepoDigestOrExact\", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. When set to \"MatchRepository\", the identity in the signature must be in the same repository as the image identity. When set to \"ExactRepository\", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by \"repository\". When set to \"RemapIdentity\", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.", + "exactRepository": "exactRepository specifies the repository that must be exactly matched by the identity in the signature. exactRepository is required if matchPolicy is set to \"ExactRepository\". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity.", + "remapIdentity": "remapIdentity specifies the prefix remapping rule for verifying image identity. remapIdentity is required if matchPolicy is set to \"RemapIdentity\". It is used to verify that the signature claims a different registry/repository prefix than the original image.", +} + +func (PolicyIdentity) SwaggerDoc() map[string]string { + return map_PolicyIdentity +} + +var map_PolicyMatchExactRepository = map[string]string{ + "repository": "repository is the reference of the image identity to be matched. repository is required if matchPolicy is set to \"ExactRepository\". The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox", +} + +func (PolicyMatchExactRepository) SwaggerDoc() map[string]string { + return map_PolicyMatchExactRepository +} + +var map_PolicyMatchRemapIdentity = map[string]string{ + "prefix": "prefix is required if matchPolicy is set to \"RemapIdentity\". prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", + "signedPrefix": "signedPrefix is required if matchPolicy is set to \"RemapIdentity\". signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", +} + +func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { + return map_PolicyMatchRemapIdentity +} + +var map_PolicyRootOfTrust = map[string]string{ + "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", + "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", + "publicKey": "publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. publicKey is required when policyType is PublicKey, and forbidden otherwise.", + "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", + "pki": "pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. pki is required when policyType is PKI, and forbidden otherwise.", +} + +func (PolicyRootOfTrust) SwaggerDoc() map[string]string { + return map_PolicyRootOfTrust +} + +var map_PublicKey = map[string]string{ + "": "PublicKey defines the root of trust based on a sigstore public key.", + "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", +} + +func (PublicKey) SwaggerDoc() map[string]string { + return map_PublicKey +} + var map_ImageTagMirrorSet = map[string]string{ "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index e52a2e5c53..77df372d43 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -134,7 +134,7 @@ type RetentionNumberConfig struct { // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 // +required - MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"` + MaxNumberOfBackups int `json:"maxNumberOfBackups"` } // RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups @@ -144,7 +144,7 @@ type RetentionSizeConfig struct { // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 // +required - MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"` + MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index c276971b5e..c048c64ef1 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -17,6 +17,8 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -51,7 +53,7 @@ type ClusterMonitoring struct { Status ClusterMonitoringStatus `json:"status,omitempty"` } -// MonitoringOperatorStatus defines the observed state of MonitoringOperator +// ClusterMonitoringStatus defines the observed state of ClusterMonitoring type ClusterMonitoringStatus struct { } @@ -72,22 +74,32 @@ type ClusterMonitoringList struct { } // ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator -// +required +// +kubebuilder:validation:MinProperties=1 type ClusterMonitoringSpec struct { // userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. - // +required - UserDefined UserDefinedMonitoring `json:"userDefined"` + // userDefined is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default value is `Disabled`. + // +optional + UserDefined *UserDefinedMonitoring `json:"userDefined,omitempty"` + // alertmanagerConfig allows users to configure how the default Alertmanager instance + // should be deployed in the `openshift-monitoring` namespace. + // alertmanagerConfig is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `DefaultConfig`. + // +optional + AlertmanagerConfig *AlertmanagerConfig `json:"alertmanagerConfig,omitempty"` } // UserDefinedMonitoring config for user-defined projects. -// +required type UserDefinedMonitoring struct { // mode defines the different configurations of UserDefinedMonitoring // Valid values are Disabled and NamespaceIsolated // Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. // NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. - // +kubebuilder:validation:Enum:="Disabled";"NamespaceIsolated" + // The current default value is `Disabled`. // +required + // +kubebuilder:validation:Enum=Disabled;NamespaceIsolated Mode UserDefinedMode `json:"mode"` } @@ -101,3 +113,212 @@ const ( // UserDefinedNamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. UserDefinedNamespaceIsolated UserDefinedMode = "NamespaceIsolated" ) + +// alertmanagerConfig provides configuration options for the default Alertmanager instance +// that runs in the `openshift-monitoring` namespace. Use this configuration to control +// whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:XValidation:rule="self.deploymentMode == 'CustomConfig' ? has(self.customConfig) : !has(self.customConfig)",message="customConfig is required when deploymentMode is CustomConfig, and forbidden otherwise" +type AlertmanagerConfig struct { + // deploymentMode determines whether the default Alertmanager instance should be deployed + // as part of the monitoring stack. + // Allowed values are Disabled, DefaultConfig, and CustomConfig. + // When set to Disabled, the Alertmanager instance will not be deployed. + // When set to DefaultConfig, the platform will deploy Alertmanager with default settings. + // When set to CustomConfig, the Alertmanager will be deployed with custom configuration. + // + // +unionDiscriminator + // +required + DeploymentMode AlertManagerDeployMode `json:"deploymentMode"` + + // customConfig must be set when deploymentMode is CustomConfig, and must be unset otherwise. + // When set to CustomConfig, the Alertmanager will be deployed with custom configuration. + // +optional + CustomConfig *AlertmanagerCustomConfig `json:"customConfig,omitempty"` +} + +// AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. +// alertmanagerCustomConfig provides configuration options for the default Alertmanager instance +// that runs in the `openshift-monitoring` namespace. Use this configuration to control +// whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:MinProperties=1 +type AlertmanagerCustomConfig struct { + // logLevel defines the verbosity of logs emitted by Alertmanager. + // This field allows users to control the amount and severity of logs generated, which can be useful + // for debugging issues or reducing noise in production environments. + // Allowed values are Error, Warn, Info, and Debug. + // When set to Error, only errors will be logged. + // When set to Warn, both warnings and errors will be logged. + // When set to Info, general information, warnings, and errors will all be logged. + // When set to Debug, detailed debugging information will be logged. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `Info`. + // +optional + LogLevel LogLevel `json:"logLevel"` + // nodeSelector defines the nodes on which the Pods are scheduled + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Alertmanager container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 4m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // secrets defines a list of secrets that need to be mounted into the Alertmanager. + // The secrets must reside within the same namespace as the Alertmanager object. + // They will be added as volumes named secret- and mounted at + // /etc/alertmanager/secrets/ within the 'alertmanager' container of + // the Alertmanager Pods. + // + // These secrets can be used to authenticate Alertmanager with endpoint receivers. + // For example, you can use secrets to: + // - Provide certificates for TLS authentication with receivers that require private CA certificates + // - Store credentials for Basic HTTP authentication with receivers that require password-based auth + // - Store any other authentication credentials needed by your alert receivers + // + // This field is optional. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Entries in this list must be unique. + // +optional + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=set + Secrets []SecretName `json:"secrets,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10 + // Minimum length for this list is 1 + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1 + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // volumeClaimTemplate Defines persistent storage for Alertmanager. Use this setting to + // configure the persistent volume claim, including storage class, volume + // size, and name. + // If omitted, the Pod uses ephemeral storage and alert data will not persist + // across restarts. + // This field is optional. + // +optional + VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// AlertManagerDeployMode defines the deployment state of the platform Alertmanager instance. +// +// Possible values: +// - "Disabled": The Alertmanager instance will not be deployed. +// - "DefaultConfig": The Alertmanager instance will be deployed with default settings. +// - "CustomConfig": The Alertmanager instance will be deployed with custom configuration. +// +kubebuilder:validation:Enum=Disabled;DefaultConfig;CustomConfig +type AlertManagerDeployMode string + +const ( + // AlertManagerModeDisabled means the Alertmanager instance will not be deployed. + AlertManagerDeployModeDisabled AlertManagerDeployMode = "Disabled" + // AlertManagerModeDefaultConfig means the Alertmanager instance will be deployed with default settings. + AlertManagerDeployModeDefaultConfig AlertManagerDeployMode = "DefaultConfig" + // AlertManagerModeCustomConfig means the Alertmanager instance will be deployed with custom configuration. + AlertManagerDeployModeCustomConfig AlertManagerDeployMode = "CustomConfig" +) + +// logLevel defines the verbosity of logs emitted by Alertmanager. +// Valid values are Error, Warn, Info and Debug. +// +kubebuilder:validation:Enum=Error;Warn;Info;Debug +type LogLevel string + +const ( + // Error only errors will be logged. + LogLevelError LogLevel = "Error" + // Warn, both warnings and errors will be logged. + LogLevelWarn LogLevel = "Warn" + // Info, general information, warnings, and errors will all be logged. + LogLevelInfo LogLevel = "Info" + // Debug, detailed debugging information will be logged. + LogLevelDebug LogLevel = "Debug" +) + +// ContainerResource defines a single resource requirement for a container. +// +kubebuilder:validation:XValidation:rule="has(self.request) || has(self.limit)",message="at least one of request or limit must be set" +// +kubebuilder:validation:XValidation:rule="!(has(self.request) && has(self.limit)) || quantity(self.limit).compareTo(quantity(self.request)) >= 0",message="limit must be greater than or equal to request" +type ContainerResource struct { + // name of the resource (e.g. "cpu", "memory", "hugepages-2Mi"). + // This field is required. + // name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character. + // +required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.qualifiedName().validate(self).hasValue()",message="name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character" + Name string `json:"name"` + + // request is the minimum amount of the resource required (e.g. "2Mi", "1Gi"). + // This field is optional. + // When limit is specified, request cannot be greater than limit. + // +optional + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:MaxLength=20 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="isQuantity(self) && quantity(self).isGreaterThan(quantity('0'))",message="request must be a positive, non-zero quantity" + Request resource.Quantity `json:"request,omitempty"` + + // limit is the maximum amount of the resource allowed (e.g. "2Mi", "1Gi"). + // This field is optional. + // When request is specified, limit cannot be less than request. + // The value must be greater than 0 when specified. + // +optional + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:MaxLength=20 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="isQuantity(self) && quantity(self).isGreaterThan(quantity('0'))",message="limit must be a positive, non-zero quantity" + Limit resource.Quantity `json:"limit,omitempty"` +} + +// SecretName is a type that represents the name of a Secret in the same namespace. +// It must be at most 253 characters in length. +// +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." +// +kubebuilder:validation:MaxLength=63 +type SecretName string diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index 24ff257c93..64a89e4a63 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -78,7 +78,7 @@ type PolicyRootOfTrust struct { // policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. // "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. // "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. - // "PKI" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + // "PKI" indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. // +unionDiscriminator // +required PolicyType PolicyType `json:"policyType"` @@ -134,7 +134,7 @@ type FulcioCAWithRekor struct { RekorKeyData []byte `json:"rekorKeyData"` // fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration. // +required - FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"` + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` } // PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. @@ -261,6 +261,7 @@ type ImagePolicyStatus struct { // conditions provide details on the status of this API Resource. // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index b605ffcf41..144b173f6b 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -6,10 +6,86 @@ package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfig) DeepCopyInto(out *AlertmanagerConfig) { + *out = *in + if in.CustomConfig != nil { + in, out := &in.CustomConfig, &out.CustomConfig + *out = new(AlertmanagerCustomConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfig. +func (in *AlertmanagerConfig) DeepCopy() *AlertmanagerConfig { + if in == nil { + return nil + } + out := new(AlertmanagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerCustomConfig) DeepCopyInto(out *AlertmanagerCustomConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretName, len(*in)) + copy(*out, *in) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerCustomConfig. +func (in *AlertmanagerCustomConfig) DeepCopy() *AlertmanagerCustomConfig { + if in == nil { + return nil + } + out := new(AlertmanagerCustomConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backup) DeepCopyInto(out *Backup) { *out = *in @@ -192,7 +268,7 @@ func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -215,7 +291,7 @@ func (in *ClusterMonitoring) DeepCopyInto(out *ClusterMonitoring) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status return } @@ -274,7 +350,16 @@ func (in *ClusterMonitoringList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { *out = *in - out.UserDefined = in.UserDefined + if in.UserDefined != nil { + in, out := &in.UserDefined, &out.UserDefined + *out = new(UserDefinedMonitoring) + **out = **in + } + if in.AlertmanagerConfig != nil { + in, out := &in.AlertmanagerConfig, &out.AlertmanagerConfig + *out = new(AlertmanagerConfig) + (*in).DeepCopyInto(*out) + } return } @@ -304,6 +389,24 @@ func (in *ClusterMonitoringStatus) DeepCopy() *ClusterMonitoringStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResource) DeepCopyInto(out *ContainerResource) { + *out = *in + out.Request = in.Request.DeepCopy() + out.Limit = in.Limit.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResource. +func (in *ContainerResource) DeepCopy() *ContainerResource { + if in == nil { + return nil + } + out := new(ContainerResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) { *out = *in @@ -462,7 +565,7 @@ func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 504281540b..b6ff150fcb 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -118,6 +118,31 @@ func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { return map_ClusterImagePolicyStatus } +var map_AlertmanagerConfig = map[string]string{ + "": "alertmanagerConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", + "deploymentMode": "deploymentMode determines whether the default Alertmanager instance should be deployed as part of the monitoring stack. Allowed values are Disabled, DefaultConfig, and CustomConfig. When set to Disabled, the Alertmanager instance will not be deployed. When set to DefaultConfig, the platform will deploy Alertmanager with default settings. When set to CustomConfig, the Alertmanager will be deployed with custom configuration.", + "customConfig": "customConfig must be set when deploymentMode is CustomConfig, and must be unset otherwise. When set to CustomConfig, the Alertmanager will be deployed with custom configuration.", +} + +func (AlertmanagerConfig) SwaggerDoc() map[string]string { + return map_AlertmanagerConfig +} + +var map_AlertmanagerCustomConfig = map[string]string{ + "": "AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. alertmanagerCustomConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", + "logLevel": "logLevel defines the verbosity of logs emitted by Alertmanager. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", + "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", + "secrets": "secrets defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods.\n\nThese secrets can be used to authenticate Alertmanager with endpoint receivers. For example, you can use secrets to: - Provide certificates for TLS authentication with receivers that require private CA certificates - Store credentials for Basic HTTP authentication with receivers that require password-based auth - Store any other authentication credentials needed by your alert receivers\n\nThis field is optional. Maximum length for this list is 10. Minimum length for this list is 1. Entries in this list must be unique.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", + "volumeClaimTemplate": "volumeClaimTemplate Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size, and name. If omitted, the Pod uses ephemeral storage and alert data will not persist across restarts. This field is optional.", +} + +func (AlertmanagerCustomConfig) SwaggerDoc() map[string]string { + return map_AlertmanagerCustomConfig +} + var map_ClusterMonitoring = map[string]string{ "": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API", "metadata": "metadata is the standard object metadata.", @@ -140,8 +165,9 @@ func (ClusterMonitoringList) SwaggerDoc() map[string]string { } var map_ClusterMonitoringSpec = map[string]string{ - "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", - "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.", + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", + "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", } func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { @@ -149,16 +175,27 @@ func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { } var map_ClusterMonitoringStatus = map[string]string{ - "": "MonitoringOperatorStatus defines the observed state of MonitoringOperator", + "": "ClusterMonitoringStatus defines the observed state of ClusterMonitoring", } func (ClusterMonitoringStatus) SwaggerDoc() map[string]string { return map_ClusterMonitoringStatus } +var map_ContainerResource = map[string]string{ + "": "ContainerResource defines a single resource requirement for a container.", + "name": "name of the resource (e.g. \"cpu\", \"memory\", \"hugepages-2Mi\"). This field is required. name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character.", + "request": "request is the minimum amount of the resource required (e.g. \"2Mi\", \"1Gi\"). This field is optional. When limit is specified, request cannot be greater than limit.", + "limit": "limit is the maximum amount of the resource allowed (e.g. \"2Mi\", \"1Gi\"). This field is optional. When request is specified, limit cannot be less than request. The value must be greater than 0 when specified.", +} + +func (ContainerResource) SwaggerDoc() map[string]string { + return map_ContainerResource +} + var map_UserDefinedMonitoring = map[string]string{ "": "UserDefinedMonitoring config for user-defined projects.", - "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.", + "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. The current default value is `Disabled`.", } func (UserDefinedMonitoring) SwaggerDoc() map[string]string { @@ -285,7 +322,7 @@ func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { var map_PolicyRootOfTrust = map[string]string{ "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", - "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. \"PKI\" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", + "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. \"PKI\" indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", "publicKey": "publicKey defines the root of trust based on a sigstore public key.", "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", "pki": "pki defines the root of trust based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates.", diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go index bf5e3cb99f..0160a4a242 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -339,7 +339,7 @@ type ConsolePluginBackend struct { // Service serving certificate. The console backend will proxy the // plugins assets from the Service using the service CA bundle. // +optional - Service *ConsolePluginService `json:"service"` + Service *ConsolePluginService `json:"service,omitempty"` } // ConsolePluginService holds information on Service that is serving @@ -368,7 +368,7 @@ type ConsolePluginService struct { // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9.\-_~!$&'()*+,;=:@\/]*$` // +kubebuilder:default:="/" // +optional - BasePath string `json:"basePath"` + BasePath string `json:"basePath,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index e3a8c94cfa..e8688e2b06 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -51,3 +51,16 @@ releases: envtest-v1.32.1-linux-arm64.tar.gz: hash: 0bc52e6344ae0753715bc39c2878696c72a3129356df484835586165238361c109ad3e1ebd354af8ecdf1026c3a2b98ed225ad0c6dd348cb3ff128a7cfdcc2f8 selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.32.1-linux-arm64.tar.gz + v1.33.2: + envtest-v1.33.2-darwin-amd64.tar.gz: + hash: d032acdc6f365059e20e22de6f78dd298e4cc48b379639c777032a97b7fe6a1d0c933f2a48879506231d57e4be9407ffc56b46485958023a766c4db3f1efb89e + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-darwin-amd64.tar.gz + envtest-v1.33.2-darwin-arm64.tar.gz: + hash: 51aa29ab015a1d1825e7155137fe30a2aafd311faf63486118d0dd026a9163c65d61e004a42b8e3ff624529734610b4dafdc12220d11d6c159ca9b3622f64497 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-darwin-arm64.tar.gz + envtest-v1.33.2-linux-amd64.tar.gz: + hash: 4f2749f668c49ac67651ca2c284252eea722aa37389c3808957a0e1a7783adf1c183aa07ff9623357b761b96022828bbd09dff019527032a474d923676835faf + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-linux-amd64.tar.gz + envtest-v1.33.2-linux-arm64.tar.gz: + hash: 9936eba66fd0170808268da4c0609b7e7d4d1b0de8607b0d3a9091539b4ec881041a9e08e7b4839708b11139bcc850acd34dfc0305ed955cc61fc3fae9da58f5 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-linux-arm64.tar.gz diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 0a5775f9c5..927568c858 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -2,98 +2,100 @@ | ------ | --- | --- | --- | --- | --- | --- | | ClusterAPIInstall| | | | | | | | EventedPLEG| | | | | | | -| MachineAPIMigration| | | | | | | | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | MultiArchInstallAzure| | | | | | | +| ShortCertRotation| | | | | | | +| SigstoreImageVerification| | | | | | | +| NoRegistryClusterOperations| | | | Enabled | | | +| BootImageSkewEnforcement| | | Enabled | Enabled | | | | ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | | Example2| | | Enabled | Enabled | | | +| ExternalSnapshotMetadata| | | Enabled | Enabled | | | | NewOLMCatalogdAPIV1Metas| | | | Enabled | | Enabled | -| SELinuxChangePolicy| | | Enabled | Enabled | | | +| NewOLMOwnSingleNamespace| | | | Enabled | | Enabled | +| NewOLMPreflightPermissionChecks| | | | Enabled | | Enabled | +| NewOLMWebhookProviderOpenshiftServiceCA| | | | Enabled | | Enabled | | SELinuxMount| | | Enabled | Enabled | | | -| ShortCertRotation| | | Enabled | Enabled | | | -| SigstoreImageVerificationPKI| | | Enabled | Enabled | | | +| VSphereMixedNodeEnv| | | Enabled | Enabled | | | | NewOLM| | Enabled | | Enabled | | Enabled | | AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | +| AWSDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | +| AWSServiceLBNetworkSecurityGroup| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | +| AzureDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | +| AzureMultiDisk| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | -| CPMSMachineNamePrefix| | | Enabled | Enabled | Enabled | Enabled | | ClusterAPIInstallIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | -| ConsolePluginContentSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | | DualReplica| | | Enabled | Enabled | Enabled | Enabled | | DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | | Example| | | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDCWithUIDAndExtraClaimMappings| | | Enabled | Enabled | Enabled | Enabled | | GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | | GCPCustomAPIEndpoints| | | Enabled | Enabled | Enabled | Enabled | -| GatewayAPI| | | Enabled | Enabled | Enabled | Enabled | -| GatewayAPIController| | | Enabled | Enabled | Enabled | Enabled | | HighlyAvailableArbiter| | | Enabled | Enabled | Enabled | Enabled | +| ImageModeStatusReporting| | | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | +| ImageVolume| | | Enabled | Enabled | Enabled | Enabled | | IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | | InsightsRuntimeExtractor| | | Enabled | Enabled | Enabled | Enabled | | KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | -| MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled | -| MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled | +| MachineAPIMigration| | | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAzure| | | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesvSphere| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | -| MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | +| MultiDiskSetup| | | Enabled | Enabled | Enabled | Enabled | +| MutatingAdmissionPolicy| | | Enabled | Enabled | Enabled | Enabled | | NodeSwap| | | Enabled | Enabled | Enabled | Enabled | | NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | Enabled | Enabled | -| OnClusterBuild| | | Enabled | Enabled | Enabled | Enabled | -| PinnedImages| | | Enabled | Enabled | Enabled | Enabled | -| PlatformOperators| | | Enabled | Enabled | Enabled | Enabled | -| ProcMountType| | | Enabled | Enabled | Enabled | Enabled | -| RouteAdvertisements| | | Enabled | Enabled | Enabled | Enabled | -| RouteExternalCertificate| | | Enabled | Enabled | Enabled | Enabled | -| ServiceAccountTokenNodeBinding| | | Enabled | Enabled | Enabled | Enabled | +| PreconfiguredUDNAddresses| | | Enabled | Enabled | Enabled | Enabled | | SignatureStores| | | Enabled | Enabled | Enabled | Enabled | -| SigstoreImageVerification| | | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerificationPKI| | | Enabled | Enabled | Enabled | Enabled | +| StoragePerformantSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | Enabled | Enabled | | UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesPodSecurityStandards| | | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesSupport| | | Enabled | Enabled | Enabled | Enabled | | VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | Enabled | Enabled | | VSphereHostVMGroupZonal| | | Enabled | Enabled | Enabled | Enabled | | VSphereMultiDisk| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiNetworks| | | Enabled | Enabled | Enabled | Enabled | | VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | -| AWSEFSDriverVolumeMetrics| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| BareMetalLoadBalancer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ChunkSizeMiB| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| CloudDualStackNodeIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| DisableKubeletCloudCredentialProviders| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| GCPLabelsTags| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| HardwareSpeed| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPIController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | IngressControllerLBSubnetsAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MachineConfigNodes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MultiArchInstallAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MultiArchInstallGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NodeDisruptionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| OnClusterBuild| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PersistentIPsForVirtualization| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | SetEIPForNLBIngressController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereDriverConfiguration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiVCenters| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ValidatingAdmissionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UserNamespacesPodSecurityStandards| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 33acf6a376..bf9a1c93e7 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -40,24 +40,24 @@ var ( reportProblemsToJiraComponent("Management Console"). contactPerson("jhadvig"). productScope(ocpSpecific). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). enhancementPR("https://github.com/openshift/enhancements/pull/1706"). mustRegister() FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). reportProblemsToJiraComponent("apiserver-auth"). - contactPerson("stlaz"). + contactPerson("ibihim"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4193"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). + FeatureGateMutatingAdmissionPolicy = newFeatureGate("MutatingAdmissionPolicy"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("benluddy"). productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3488"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3962"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). @@ -65,7 +65,7 @@ var ( contactPerson("miciah"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateSetEIPForNLBIngressController = newFeatureGate("SetEIPForNLBIngressController"). @@ -100,14 +100,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack"). - reportProblemsToJiraComponent("openstack"). - contactPerson("egarcia"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -140,6 +132,14 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateAzureDedicatedHosts = newFeatureGate("AzureDedicatedHosts"). + reportProblemsToJiraComponent("installer"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1783"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). reportProblemsToJiraComponent("apps"). contactPerson("atiratree"). @@ -155,20 +155,11 @@ var ( enhancementPR("https://github.com/kubernetes/enhancements/issues/3386"). mustRegister() - FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). - reportProblemsToJiraComponent("Routing"). - contactPerson("miciah"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). reportProblemsToJiraComponent("node"). contactPerson("sgrunert"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateSigstoreImageVerificationPKI = newFeatureGate("SigstoreImageVerificationPKI"). @@ -176,17 +167,9 @@ var ( contactPerson("QiWang"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1658"). - enableIn(configv1.DevPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags"). - reportProblemsToJiraComponent("Installer"). - contactPerson("bhb"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). @@ -195,14 +178,6 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs"). - reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). - contactPerson("mkowalsk"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3705"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). reportProblemsToJiraComponent("splat"). contactPerson("jcpowermac"). @@ -219,28 +194,12 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateVSphereMultiVCenters = newFeatureGate("VSphereMultiVCenters"). - reportProblemsToJiraComponent("splat"). - contactPerson("vr4manta"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). - reportProblemsToJiraComponent("splat"). - contactPerson("rvanderp3"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). reportProblemsToJiraComponent("router"). - contactPerson("thejasn"). + contactPerson("chiragkyal"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateCPMSMachineNamePrefix = newFeatureGate("CPMSMachineNamePrefix"). @@ -248,7 +207,7 @@ var ( contactPerson("chiragkyal"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1714"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). @@ -280,7 +239,7 @@ var ( contactPerson("jcaamano"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). @@ -307,14 +266,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed"). - reportProblemsToJiraComponent("etcd"). - contactPerson("hasbro17"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateBackendQuotaGiB = newFeatureGate("EtcdBackendQuota"). reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). @@ -348,12 +299,20 @@ var ( FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("cdoern"). + contactPerson("ijanssen"). productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1765"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateImageModeStatusReporting = newFeatureGate("ImageModeStatusReporting"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("ijanssen"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1809"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). reportProblemsToJiraComponent("Installer"). contactPerson("vincepri"). @@ -401,20 +360,36 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/2395"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + FeatureGateManagedBootImagesvSphere = newFeatureGate("ManagedBootImagesvSphere"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("rsaini"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1496"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateManagedBootImagesAzure = newFeatureGate("ManagedBootImagesAzure"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1761"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBootImageSkewEnforcement = newFeatureGate("BootImageSkewEnforcement"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1761"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("dkhater"). + contactPerson("cheesesashimi"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateBootcNodeManagement = newFeatureGate("BootcNodeManagement"). @@ -443,10 +418,10 @@ var ( FeatureGatePinnedImages = newFeatureGate("PinnedImages"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jhernand"). + contactPerson("RishabhSaini"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). @@ -481,6 +456,14 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateExternalSnapshotMetadata = newFeatureGate("ExternalSnapshotMetadata"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("jdobson"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3314"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC"). reportProblemsToJiraComponent("authentication"). contactPerson("liouk"). @@ -490,6 +473,15 @@ var ( enableForClusterProfile(Hypershift, configv1.Default, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateExternalOIDCWithAdditionalClaimMappings = newFeatureGate("ExternalOIDCWithUIDAndExtraClaimMappings"). + reportProblemsToJiraComponent("authentication"). + contactPerson("bpalmer"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1777"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableForClusterProfile(Hypershift, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateExample = newFeatureGate("Example"). reportProblemsToJiraComponent("cluster-config"). contactPerson("deads"). @@ -506,14 +498,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade). mustRegister() - FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). - reportProblemsToJiraComponent("olm"). - contactPerson("joe"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateNewOLM = newFeatureGate("NewOLM"). reportProblemsToJiraComponent("olm"). contactPerson("joe"). @@ -530,6 +514,30 @@ var ( enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateNewOLMPreflightPermissionChecks = newFeatureGate("NewOLMPreflightPermissionChecks"). + reportProblemsToJiraComponent("olm"). + contactPerson("tshort"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1768"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNewOLMOwnSingleNamespace = newFeatureGate("NewOLMOwnSingleNamespace"). + reportProblemsToJiraComponent("olm"). + contactPerson("nschieder"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1774"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNewOLMWebhookProviderOpenshiftServiceCA = newFeatureGate("NewOLMWebhookProviderOpenshiftServiceCA"). + reportProblemsToJiraComponent("olm"). + contactPerson("pegoncal"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1799"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -538,14 +546,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). - reportProblemsToJiraComponent("metal"). - contactPerson("EmilienM"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateInsightsConfig = newFeatureGate("InsightsConfig"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -554,27 +554,11 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jerzhang"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). reportProblemsToJiraComponent("Monitoring"). contactPerson("rexagod"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("rbednar"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -599,16 +583,9 @@ var ( contactPerson("jspeed"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGatePersistentIPsForVirtualization = newFeatureGate("PersistentIPsForVirtualization"). - reportProblemsToJiraComponent("CNV Network"). - contactPerson("mduarted"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateClusterMonitoringConfig = newFeatureGate("ClusterMonitoringConfig"). reportProblemsToJiraComponent("Monitoring"). contactPerson("marioferh"). @@ -617,14 +594,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateMultiArchInstallAWS = newFeatureGate("MultiArchInstallAWS"). - reportProblemsToJiraComponent("Installer"). - contactPerson("r4f4"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMultiArchInstallAzure = newFeatureGate("MultiArchInstallAzure"). reportProblemsToJiraComponent("Installer"). contactPerson("r4f4"). @@ -632,14 +601,6 @@ var ( enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() - FeatureGateMultiArchInstallGCP = newFeatureGate("MultiArchInstallGCP"). - reportProblemsToJiraComponent("Installer"). - contactPerson("r4f4"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateIngressControllerLBSubnetsAWS = newFeatureGate("IngressControllerLBSubnetsAWS"). reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). @@ -648,14 +609,6 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateAWSEFSDriverVolumeMetrics = newFeatureGate("AWSEFSDriverVolumeMetrics"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("fbertina"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateImageStreamImportMode = newFeatureGate("ImageStreamImportMode"). reportProblemsToJiraComponent("Multi-Arch"). contactPerson("psundara"). @@ -669,15 +622,18 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() + // Note: this feature is perma-alpha, but it is safe and desireable to enable. + // It was an oversight in upstream to not remove the feature gate after the version skew became safe in 1.33. + // See https://github.com/kubernetes/enhancements/tree/d4226c42/keps/sig-node/127-user-namespaces#pod-security-standards-pss-integration FeatureGateUserNamespacesPodSecurityStandards = newFeatureGate("UserNamespacesPodSecurityStandards"). reportProblemsToJiraComponent("Node"). contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateProcMountType = newFeatureGate("ProcMountType"). @@ -685,7 +641,7 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). @@ -693,7 +649,7 @@ var ( contactPerson("rvanderp"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateIngressControllerDynamicConfigurationManager = newFeatureGate("IngressControllerDynamicConfigurationManager"). @@ -763,14 +719,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateSELinuxChangePolicy = newFeatureGate("SELinuxChangePolicy"). - reportProblemsToJiraComponent("Storage / Kubernetes"). - contactPerson("jsafrane"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/1710"). - enableIn(configv1.DevPreviewNoUpgrade). - mustRegister() - FeatureGateSELinuxMount = newFeatureGate("SELinuxMount"). reportProblemsToJiraComponent("Storage / Kubernetes"). contactPerson("jsafrane"). @@ -784,11 +732,8 @@ var ( contactPerson("jaypoulz"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1675"). - // TODO: Do not go GA until jira issue is resolved: https://issues.redhat.com/browse/OCPEDGE-1637 - // Annotations must correctly handle either DualReplica or HighlyAvailableArbiter going GA with - // the other still in TechPreview. - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() FeatureGateGatewayAPIController = newFeatureGate("GatewayAPIController"). reportProblemsToJiraComponent("Routing"). @@ -800,14 +745,13 @@ var ( // A dedicated feature gate now controls the Gateway Controller to distinguish // its production readiness from that of the CRDs. enhancementPR("https://github.com/openshift/enhancements/pull/1756"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureShortCertRotation = newFeatureGate("ShortCertRotation"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("vrutkovs"). productScope(ocpSpecific). - enableIn(configv1.DevPreviewNoUpgrade). enhancementPR("https://github.com/openshift/enhancements/pull/1670"). mustRegister() @@ -818,4 +762,76 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1748"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + + FeatureGateAzureMultiDisk = newFeatureGate("AzureMultiDisk"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1779"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateStoragePerformantSecurityPolicy = newFeatureGate("StoragePerformantSecurityPolicy"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("hekumar"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1804"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMultiDiskSetup = newFeatureGate("MultiDiskSetup"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1805"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSDedicatedHosts = newFeatureGate("AWSDedicatedHosts"). + reportProblemsToJiraComponent("Installer"). + contactPerson("faermanj"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1781"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereMixedNodeEnv = newFeatureGate("VSphereMixedNodeEnv"). + reportProblemsToJiraComponent("splat"). + contactPerson("vr4manta"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1772"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + + FeatureGatePreconfiguredUDNAddresses = newFeatureGate("PreconfiguredUDNAddresses"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("kyrtapz"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1793"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). + reportProblemsToJiraComponent("Cloud Compute / Cloud Controller Manager"). + contactPerson("mtulio"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1802"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateImageVolume = newFeatureGate("ImageVolume"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(kubernetes). + enhancementPR("https://github.com/openshift/enhancements/pull/1792"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNoRegistryClusterOperations = newFeatureGate("NoRegistryClusterOperations"). + reportProblemsToJiraComponent("Installer / Agent based installation"). + contactPerson("andfasano"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1821"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go index 132a3dacb3..ae3f1ecf0f 100644 --- a/vendor/github.com/openshift/api/features/legacyfeaturegates.go +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -17,8 +17,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "AzureWorkloadIdentity", // never add to this list, if you think you have an exception ask @deads2k - "BareMetalLoadBalancer", - // never add to this list, if you think you have an exception ask @deads2k "BootcNodeManagement", // never add to this list, if you think you have an exception ask @deads2k "BuildCSIVolumes", @@ -41,8 +39,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "GCPClusterHostedDNS", // never add to this list, if you think you have an exception ask @deads2k - "GCPLabelsTags", - // never add to this list, if you think you have an exception ask @deads2k "GatewayAPI", // never add to this list, if you think you have an exception ask @deads2k "HardwareSpeed", @@ -93,8 +89,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "NewOLM", // never add to this list, if you think you have an exception ask @deads2k - "NodeDisruptionPolicy", - // never add to this list, if you think you have an exception ask @deads2k "OVNObservability", // never add to this list, if you think you have an exception ask @deads2k "OnClusterBuild", @@ -103,8 +97,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "PinnedImages", // never add to this list, if you think you have an exception ask @deads2k - "PlatformOperators", - // never add to this list, if you think you have an exception ask @deads2k "PrivateHostedZoneAWS", // never add to this list, if you think you have an exception ask @deads2k "RouteAdvertisements", diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto index dabdc6d84a..74d8d66c47 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.proto +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -31,11 +31,11 @@ message DockerImageReference { optional string iD = 5; } -// Image is an immutable representation of a container image and metadata at a point in time. +// Image is an immutable representation of a container image and its metadata at a point in time. // Images are named by taking a hash of their contents (metadata and content) and any change // in format, content, or metadata results in a new name. The images resource is primarily // for use by cluster administrators and integrations like the cluster image registry - end -// users instead access images via the imagestreamtags or imagestreamimages resources. While +// users, instead, access images via the imagestreamtags or imagestreamimages resources. While // image metadata is stored in the API, any integration that implements the container image // registry API must provide its own storage for the raw manifest data, image config, and // layer contents. @@ -353,12 +353,15 @@ message ImageStreamImportSpec { // ImageStreamImportStatus contains information about the status of an image stream import. message ImageStreamImportStatus { // import is the image stream that was successfully updated or created when 'to' was set. + // +optional optional ImageStream import = 1; // repository is set if spec.repository was set to the outcome of the import + // +optional optional RepositoryImportStatus repository = 2; // images is set with the result of importing spec.images + // +optional repeated ImageImportStatus images = 3; } @@ -436,17 +439,20 @@ message ImageStreamSpec { message ImageStreamStatus { // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located + // +optional optional string dockerImageRepository = 1; // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. + // +optional optional string publicDockerImageRepository = 3; // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge + // +optional repeated NamedTagEventList tags = 2; } @@ -504,7 +510,7 @@ message ImageStreamTagList { // the status history, and the currently referenced image (if any) of the provided // tag. This type replaces the ImageStreamTag by providing a full view of the tag. // ImageTags are returned for every spec or status tag present on the image stream. -// If no tag exists in either form a not found error will be returned by the API. +// If no tag exists in either form, a not found error will be returned by the API. // A create operation will succeed if no spec tag has already been defined and the // spec field is set. Delete will remove both spec and status elements from the // image stream. diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go index d4ee4bff69..e45d6b10da 100644 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -27,11 +27,11 @@ type ImageList struct { // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Image is an immutable representation of a container image and metadata at a point in time. +// Image is an immutable representation of a container image and its metadata at a point in time. // Images are named by taking a hash of their contents (metadata and content) and any change // in format, content, or metadata results in a new name. The images resource is primarily // for use by cluster administrators and integrations like the cluster image registry - end -// users instead access images via the imagestreamtags or imagestreamimages resources. While +// users, instead, access images via the imagestreamtags or imagestreamimages resources. While // image metadata is stored in the API, any integration that implements the container image // registry API must provide its own storage for the raw manifest data, image config, and // layer contents. @@ -357,15 +357,18 @@ type TagReferencePolicy struct { type ImageStreamStatus struct { // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located + // +optional DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. + // +optional PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge + // +optional Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` } @@ -512,7 +515,7 @@ type ImageStreamTagList struct { // the status history, and the currently referenced image (if any) of the provided // tag. This type replaces the ImageStreamTag by providing a full view of the tag. // ImageTags are returned for every spec or status tag present on the image stream. -// If no tag exists in either form a not found error will be returned by the API. +// If no tag exists in either form, a not found error will be returned by the API. // A create operation will succeed if no spec tag has already been defined and the // spec field is set. Delete will remove both spec and status elements from the // image stream. @@ -701,10 +704,13 @@ type ImageStreamImportSpec struct { // ImageStreamImportStatus contains information about the status of an image stream import. type ImageStreamImportStatus struct { // import is the image stream that was successfully updated or created when 'to' was set. + // +optional Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` // repository is set if spec.repository was set to the outcome of the import + // +optional Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` // images is set with the result of importing spec.images + // +optional Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` } diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go index e0720bec77..9671768f6e 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -25,7 +25,7 @@ func (DockerImageReference) SwaggerDoc() map[string]string { } var map_Image = map[string]string{ - "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Image is an immutable representation of a container image and its metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users, instead, access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "dockerImageReference": "dockerImageReference is the string that can be used to pull this image.", "dockerImageMetadata": "dockerImageMetadata contains metadata about this image", @@ -284,7 +284,7 @@ func (ImageStreamTagList) SwaggerDoc() map[string]string { } var map_ImageTag = map[string]string{ - "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form, a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.", diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index 3dde9d4c3c..e4f5319460 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -129,9 +129,11 @@ type ImageRegistryStatus struct { operatorv1.OperatorStatus `json:",inline"` // storageManaged is deprecated, please refer to Storage.managementState + // +required StorageManaged bool `json:"storageManaged"` // storage indicates the current applied storage configuration of the // registry. + // +required Storage ImageRegistryConfigStorage `json:"storage"` } @@ -483,7 +485,7 @@ type EncryptionAlibaba struct { // +kubebuilder:validation:Enum="KMS";"AES256" // +kubebuilder:default="AES256" // +optional - Method AlibabaEncryptionMethod `json:"method"` + Method AlibabaEncryptionMethod `json:"method,omitempty"` // kms (key management service) is an encryption type that holds the struct for KMS KeyID // +optional diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index ead8b20771..409ffc64e0 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -135,7 +135,7 @@ type ControlPlaneMachineSetTemplate struct { // Currently, the only valid value is machines_v1beta1_machine_openshift_io. // +unionDiscriminator // +required - MachineType ControlPlaneMachineSetMachineType `json:"machineType,omitempty"` + MachineType ControlPlaneMachineSetMachineType `json:"machineType"` // OpenShiftMachineV1Beta1Machine defines the template for creating Machines // from the v1beta1.machine.openshift.io API group. @@ -286,7 +286,6 @@ type FailureDomains struct { VSphere []VSphereFailureDomain `json:"vsphere,omitempty"` // openstack configures failure domain information for the OpenStack platform. - // +optional // // + --- // + Unlike other platforms, OpenStack failure domains can be empty. diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go index e2ddde2ad7..446082efb3 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -238,7 +238,7 @@ type NutanixVMStorageConfig struct { // storageContainer refers to the storage_container used by the VM disk. // +optional - StorageContainer *NutanixStorageResourceIdentifier `json:"storageContainer"` + StorageContainer *NutanixStorageResourceIdentifier `json:"storageContainer,omitempty"` } // NutanixDiskDeviceType is the VM disk device type. @@ -286,7 +286,7 @@ type NutanixVMDiskDeviceProperties struct { // If the deviceType is "Disk", the valid adapterType can be "SCSI", "IDE", "PCI", "SATA" or "SPAPR". // If the deviceType is "CDRom", the valid adapterType can be "IDE" or "SATA". // +required - AdapterType NutanixDiskAdapterType `json:"adapterType,omitempty"` + AdapterType NutanixDiskAdapterType `json:"adapterType"` // deviceIndex is the index of the disk address. The valid values are non-negative integers, with the default value 0. // For a Machine VM, the deviceIndex for the disks with the same deviceType.adapterType combination should diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index d69bcd0233..db15df2cc4 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -140,8 +140,10 @@ type BlockDeviceMappingSpec struct { // https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice type EBSBlockDeviceSpec struct { // Indicates whether the EBS volume is deleted on machine termination. + // + // Deprecated: setting this field has no effect. // +optional - DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + DeprecatedDeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes // may only be attached to machines that support Amazon EBS encryption. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go index 760360bd57..2cb43d4313 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -368,7 +368,7 @@ type DataDisk struct { // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=63 // +required - Lun int32 `json:"lun,omitempty"` + Lun int32 `json:"lun"` // cachingType specifies the caching requirements. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is CachingTypeNone. @@ -518,7 +518,7 @@ type SecuritySettings struct { // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch // +required // +unionDiscriminator - SecurityType SecurityTypes `json:"securityType,omitempty"` + SecurityType SecurityTypes `json:"securityType"` // confidentialVM specifies the security configuration of the virtual machine. // For more information regarding Confidential VMs, please refer to: // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview @@ -535,14 +535,14 @@ type SecuritySettings struct { type ConfidentialVM struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. // +required - UEFISettings UEFISettings `json:"uefiSettings,omitempty"` + UEFISettings UEFISettings `json:"uefiSettings"` } // TrustedLaunch defines the UEFI settings for the virtual machine. type TrustedLaunch struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. // +required - UEFISettings UEFISettings `json:"uefiSettings,omitempty"` + UEFISettings UEFISettings `json:"uefiSettings"` } // UEFISettings specifies the security settings like secure boot and vTPM used while creating the diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index 9bd3bdd60b..33f472f926 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -392,6 +392,7 @@ type MachineStatus struct { // conditions defines the current state of the Machine // +listType=map // +listMapKey=type + // +optional Conditions []Condition `json:"conditions,omitempty"` // authoritativeAPI is the API that is authoritative for this resource. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go index 76c79acb0b..00dbebc9eb 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -89,7 +89,6 @@ type MachineHealthCheckSpec struct { // Expects an unsigned duration string of decimal numbers each with optional // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - // +optional // +kubebuilder:default:="10m" // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" // +kubebuilder:validation:Type:=string @@ -130,10 +129,12 @@ type UnhealthyCondition struct { type MachineHealthCheckStatus struct { // total number of machines counted by this machine health check // +kubebuilder:validation:Minimum=0 + // +optional ExpectedMachines *int `json:"expectedMachines"` // total number of machines counted by this machine health check // +kubebuilder:validation:Minimum=0 + // +optional CurrentHealthy *int `json:"currentHealthy"` // remediationsAllowed is the number of further remediations allowed by this machine health check before diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index a29977f347..a2343dc398 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -114,6 +114,7 @@ type MachineTemplateSpec struct { // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" type MachineSetStatus struct { // replicas is the most recently observed number of replicas. + // +optional Replicas int32 `json:"replicas"` // The number of replicas that have labels matching the labels of the machine template of the MachineSet. // +optional @@ -153,6 +154,7 @@ type MachineSetStatus struct { // conditions defines the current state of the MachineSet // +listType=map // +listMapKey=type + // +optional Conditions []Condition `json:"conditions,omitempty"` // authoritativeAPI is the API that is authoritative for this resource. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go index 812358e89f..b67cb32432 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -213,7 +213,7 @@ type Condition struct { // This should be when the underlying condition changed. If that is not known, then using the time when // the API field changed is acceptable. // +required - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` // The reason for the condition's last transition in CamelCase. // The specific API may choose whether or not this field is considered a guaranteed API. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index ef8f1a55fe..7763435a9e 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -518,8 +518,8 @@ func (in *DiskSettings) DeepCopy() *DiskSettings { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { *out = *in - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + if in.DeprecatedDeleteOnTermination != nil { + in, out := &in.DeprecatedDeleteOnTermination, &out.DeprecatedDeleteOnTermination *out = new(bool) **out = **in } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index b2e55376fe..2667a0aa24 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -84,7 +84,7 @@ func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", - "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.", + "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/register.go b/vendor/github.com/openshift/api/machineconfiguration/v1/register.go index 46cf07a95d..d0a88324f7 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/register.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/register.go @@ -40,6 +40,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &MachineOSBuildList{}, &PinnedImageSet{}, &PinnedImageSetList{}, + &MachineConfigNode{}, + &MachineConfigNodeList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index 5ba1b9d59a..a6552550a7 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -222,11 +222,11 @@ type ControllerCertificate struct { // notBefore is the lower boundary for validity // +optional - NotBefore *metav1.Time `json:"notBefore"` + NotBefore *metav1.Time `json:"notBefore,omitempty"` // notAfter is the upper boundary for validity // +optional - NotAfter *metav1.Time `json:"notAfter"` + NotAfter *metav1.Time `json:"notAfter,omitempty"` // bundleFile is the larger bundle a cert comes from // +required @@ -323,7 +323,7 @@ type MachineConfigSpec struct { // config is a Ignition Config object. // +optional - Config runtime.RawExtension `json:"config"` + Config runtime.RawExtension `json:"config,omitempty"` // kernelArguments contains a list of kernel arguments to be added // +listType=atomic @@ -598,7 +598,7 @@ type MachineConfigPoolCondition struct { // status of the condition, one of ('True', 'False', 'Unknown'). // +optional - Status corev1.ConditionStatus `json:"status"` + Status corev1.ConditionStatus `json:"status,omitempty"` // lastTransitionTime is the timestamp corresponding to the last status // change of this condition. @@ -742,7 +742,7 @@ type KubeletConfigCondition struct { // status of the condition, one of True, False, Unknown. // +optional - Status corev1.ConditionStatus `json:"status"` + Status corev1.ConditionStatus `json:"status,omitempty"` // lastTransitionTime is the time of the last update to the current status object. // +optional @@ -878,7 +878,7 @@ type ContainerRuntimeConfigCondition struct { // status of the condition, one of True, False, Unknown. // +optional - Status corev1.ConditionStatus `json:"status"` + Status corev1.ConditionStatus `json:"status,omitempty"` // lastTransitionTime is the time of the last update to the current status object. // +nullable diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go new file mode 100644 index 0000000000..a1d37f0c9a --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go @@ -0,0 +1,245 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machineconfignodes,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2255 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 +// +openshift:enable:FeatureGate=MachineConfigNodes +// +kubebuilder:printcolumn:name="PoolName",type="string",JSONPath=.spec.pool.name,priority=0 +// +kubebuilder:printcolumn:name="DesiredConfig",type="string",JSONPath=.spec.configVersion.desired,priority=0 +// +kubebuilder:printcolumn:name="CurrentConfig",type="string",JSONPath=.status.configVersion.current,priority=0 +// +kubebuilder:printcolumn:name="Updated",type="string",JSONPath=.status.conditions[?(@.type=="Updated")].status,priority=0 +// +kubebuilder:printcolumn:name="UpdatePrepared",type="string",JSONPath=.status.conditions[?(@.type=="UpdatePrepared")].status,priority=1 +// +kubebuilder:printcolumn:name="UpdateExecuted",type="string",JSONPath=.status.conditions[?(@.type=="UpdateExecuted")].status,priority=1 +// +kubebuilder:printcolumn:name="UpdatePostActionComplete",type="string",JSONPath=.status.conditions[?(@.type=="UpdatePostActionComplete")].status,priority=1 +// +kubebuilder:printcolumn:name="UpdateComplete",type="string",JSONPath=.status.conditions[?(@.type=="UpdateComplete")].status,priority=1 +// +kubebuilder:printcolumn:name="Resumed",type="string",JSONPath=.status.conditions[?(@.type=="Resumed")].status,priority=1 +// +kubebuilder:printcolumn:name="UpdatedFilesAndOS",type="string",JSONPath=.status.conditions[?(@.type=="AppliedFilesAndOS")].status,priority=1 +// +kubebuilder:printcolumn:name="CordonedNode",type="string",JSONPath=.status.conditions[?(@.type=="Cordoned")].status,priority=1 +// +kubebuilder:printcolumn:name="DrainedNode",type="string",JSONPath=.status.conditions[?(@.type=="Drained")].status,priority=1 +// +kubebuilder:printcolumn:name="RebootedNode",type="string",JSONPath=.status.conditions[?(@.type=="RebootedNode")].status,priority=1 +// +kubebuilder:printcolumn:name="UncordonedNode",type="string",JSONPath=.status.conditions[?(@.type=="Uncordoned")].status,priority=1 +// +kubebuilder:metadata:labels=openshift.io/operator-managed= + +// MachineConfigNode describes the health of the Machines on the system +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == self.spec.node.name",message="spec.node.name should match metadata.name" +type MachineConfigNode struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the configuration of the machine config node. + // +required + Spec MachineConfigNodeSpec `json:"spec"` + + // status describes the last observed state of this machine config node. + // +optional + Status MachineConfigNodeStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineConfigNodeList describes all of the MachinesStates on the system +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineConfigNodeList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata"` + + // items contains a collection of MachineConfigNode resources. + // +kubebuilder:validation:MaxItems=100 + // +optional + Items []MachineConfigNode `json:"items"` +} + +// MCOObjectReference holds information about an object the MCO either owns +// or modifies in some way +type MCOObjectReference struct { + // name is the name of the object being referenced. For example, this can represent a machine + // config pool or node name. + // Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting + // of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end + // with an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` +} + +// MachineConfigNodeSpec describes the MachineConfigNode we are managing. +type MachineConfigNodeSpec struct { + // node contains a reference to the node for this machine config node. + // +required + Node MCOObjectReference `json:"node"` + + // pool contains a reference to the machine config pool that this machine config node's + // referenced node belongs to. + // +required + Pool MCOObjectReference `json:"pool"` + + // configVersion holds the desired config version for the node targeted by this machine config node resource. + // The desired version represents the machine config the node will attempt to update to and gets set before the machine config operator validates + // the new machine config against the current machine config. + // +required + ConfigVersion MachineConfigNodeSpecMachineConfigVersion `json:"configVersion"` +} + +// MachineConfigNodeStatus holds the reported information on a particular machine config node. +type MachineConfigNodeStatus struct { + // conditions represent the observations of a machine config node's current state. Valid types are: + // UpdatePrepared, UpdateExecuted, UpdatePostActionComplete, UpdateComplete, Updated, Resumed, + // Drained, AppliedFilesAndOS, Cordoned, Uncordoned, RebootedNode, NodeDegraded, PinnedImageSetsProgressing, + // and PinnedImageSetsDegraded. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=20 + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // observedGeneration represents the generation of the MachineConfigNode object observed by the Machine Config Operator's controller. + // This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf", message="observedGeneration must not decrease" + // +kubebuilder:validation:Minimum=1 + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // configVersion describes the current and desired machine config version for this node. + // +optional + ConfigVersion *MachineConfigNodeStatusMachineConfigVersion `json:"configVersion,omitempty"` + // pinnedImageSets describes the current and desired pinned image sets for this node. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=100 + // +optional + PinnedImageSets []MachineConfigNodeStatusPinnedImageSet `json:"pinnedImageSets,omitempty"` +} + +// MachineConfigNodeStatusPinnedImageSet holds information about the current, desired, and failed pinned image sets for the observed machine config node. +// +kubebuilder:validation:XValidation:rule="has(self.desiredGeneration) && has(self.currentGeneration) ? self.desiredGeneration >= self.currentGeneration : true",message="desired generation must be greater than or equal to the current generation" +// +kubebuilder:validation:XValidation:rule="has(self.lastFailedGeneration) && has(self.desiredGeneration) ? self.desiredGeneration >= self.lastFailedGeneration : true",message="desired generation must be greater than or equal to the last failed generation" +// +kubebuilder:validation:XValidation:rule="has(self.lastFailedGeneration) ? has(self.lastFailedGenerationError) : true",message="last failed generation error must be defined on image pull and pin failure" +type MachineConfigNodeStatusPinnedImageSet struct { + // name is the name of the pinned image set. + // Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting + // of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end + // with an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` + // currentGeneration is the generation of the pinned image set that has most recently been successfully pulled and pinned on this node. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf", message="currentGeneration must not decrease" + // +kubebuilder:validation:Minimum=1 + // +optional + CurrentGeneration int32 `json:"currentGeneration,omitempty"` + // desiredGeneration is the generation of the pinned image set that is targeted to be pulled and pinned on this node. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf", message="desiredGeneration must not decrease" + // +kubebuilder:validation:Minimum=1 + // +optional + DesiredGeneration int32 `json:"desiredGeneration,omitempty"` + // lastFailedGeneration is the generation of the most recent pinned image set that failed to be pulled and pinned on this node. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf", message="lastFailedGeneration must not decrease" + // +kubebuilder:validation:Minimum=1 + // +optional + LastFailedGeneration int32 `json:"lastFailedGeneration,omitempty"` + // lastFailedGenerationError is the error explaining why the desired images failed to be pulled and pinned. + // The error is an empty string if the image pull and pin is successful. + // +kubebuilder:validation:MaxLength=32768 + // +optional + LastFailedGenerationError string `json:"lastFailedGenerationError,omitempty"` +} + +// MachineConfigNodeStatusMachineConfigVersion holds the current and desired config versions as last updated in the MCN status. +// When the current and desired versions do not match, the machine config pool is processing an upgrade and the machine config node will +// monitor the upgrade process. +// When the current and desired versions do match, the machine config node will ignore these events given that certain operations +// happen both during the MCO's upgrade mode and the daily operations mode. +type MachineConfigNodeStatusMachineConfigVersion struct { + // current is the name of the machine config currently in use on the node. + // This value is updated once the machine config daemon has completed the update of the configuration for the node. + // This value should match the desired version unless an upgrade is in progress. + // Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting + // of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end + // with an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +optional + Current string `json:"current"` + // desired is the MachineConfig the node wants to upgrade to. + // This value gets set in the machine config node status once the machine config has been validated + // against the current machine config. + // Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting + // of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end + // with an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Desired string `json:"desired"` +} + +// MachineConfigNodeSpecMachineConfigVersion holds the desired config version for the current observed machine config node. +// When Current is not equal to Desired, the MachineConfigOperator is in an upgrade phase and the machine config node will +// take account of upgrade related events. Otherwise, they will be ignored given that certain operations +// happen both during the MCO's upgrade mode and the daily operations mode. +type MachineConfigNodeSpecMachineConfigVersion struct { + // desired is the name of the machine config that the the node should be upgraded to. + // This value is set when the machine config pool generates a new version of its rendered configuration. + // When this value is changed, the machine config daemon starts the node upgrade process. + // This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. + // Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting + // of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end + // with an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Desired string `json:"desired"` +} + +// StateProgress is each possible state for each possible MachineConfigNodeType +// +enum +type StateProgress string + +const ( + // MachineConfigNodeUpdatePrepared describes a machine that is preparing in the daemon to trigger an update + MachineConfigNodeUpdatePrepared StateProgress = "UpdatePrepared" + // MachineConfigNodeUpdateExecuted describes a machine that has executed the body of the upgrade + MachineConfigNodeUpdateExecuted StateProgress = "UpdateExecuted" + // MachineConfigNodeUpdatePostActionComplete describes a machine that has executed its post update action + MachineConfigNodeUpdatePostActionComplete StateProgress = "UpdatePostActionComplete" + // MachineConfigNodeUpdateComplete describes a machine that has completed the core parts of an upgrade + MachineConfigNodeUpdateComplete StateProgress = "UpdateComplete" + // MachineConfigNodeUpdated describes a machine that is fully updated and has a matching desired and current config + MachineConfigNodeUpdated StateProgress = "Updated" + // MachineConfigNodeUpdateResumed describes a machine that has resumed normal processes + MachineConfigNodeResumed StateProgress = "Resumed" + // MachineConfigNodeUpdateDrained describes the part of the in progress phase where the node drains + MachineConfigNodeUpdateDrained StateProgress = "Drained" + // MachineConfigNodeUpdateFilesAndOS describes the part of the in progress phase where the nodes files and OS config change + MachineConfigNodeUpdateFilesAndOS StateProgress = "AppliedFilesAndOS" + // MachineConfigNodeUpdateCordoned describes the part of the in progress phase where the node cordons + MachineConfigNodeUpdateCordoned StateProgress = "Cordoned" + // MachineConfigNodeUpdateUncordoned describes the part of the completing phase where the node uncordons + MachineConfigNodeUpdateUncordoned StateProgress = "Uncordoned" + // MachineConfigNodeUpdateRebooted describes the part of the post action phase where the node reboots itself + MachineConfigNodeUpdateRebooted StateProgress = "RebootedNode" + // MachineConfigNodeNodeDegraded describes a machine that has failed to update to the desired machine config and is in a degraded state + MachineConfigNodeNodeDegraded StateProgress = "NodeDegraded" + // MachineConfigNodePinnedImageSetsProgressing describes a machine currently progressing to the desired pinned image sets + MachineConfigNodePinnedImageSetsProgressing StateProgress = "PinnedImageSetsProgressing" + // MachineConfigNodePinnedImageSetsDegraded describes a machine that has failed to progress to the desired pinned image sets + MachineConfigNodePinnedImageSetsDegraded StateProgress = "PinnedImageSetsDegraded" +) diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go index beea7e7dcd..4a1681fb5e 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go @@ -179,10 +179,10 @@ type ObjectReference struct { Group string `json:"group"` // resource of the referent. // This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, - // and should start and end with an alphanumeric character. + // and should start with an alphabetic character and end with an alphanumeric character. // Example: "deployments", "deploymentconfigs", "pods", etc. // +required - // +kubebuilder:validation:XValidation:rule=`!format.dns1123Label().validate(self).hasValue()`,message="the value must consist of only lowercase alphanumeric characters and hyphens" + // +kubebuilder:validation:XValidation:rule=`!format.dns1035Label().validate(self).hasValue()`,message="a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 Resource string `json:"resource"` diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go index 36d4fb0c97..f153cc0237 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go @@ -589,6 +589,22 @@ func (in *KubeletConfigStatus) DeepCopy() *KubeletConfigStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MCOObjectReference) DeepCopyInto(out *MCOObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCOObjectReference. +func (in *MCOObjectReference) DeepCopy() *MCOObjectReference { + if in == nil { + return nil + } + out := new(MCOObjectReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineConfig) DeepCopyInto(out *MachineConfig) { *out = *in @@ -649,6 +665,167 @@ func (in *MachineConfigList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNode) DeepCopyInto(out *MachineConfigNode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNode. +func (in *MachineConfigNode) DeepCopy() *MachineConfigNode { + if in == nil { + return nil + } + out := new(MachineConfigNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfigNode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeList) DeepCopyInto(out *MachineConfigNodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineConfigNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeList. +func (in *MachineConfigNodeList) DeepCopy() *MachineConfigNodeList { + if in == nil { + return nil + } + out := new(MachineConfigNodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineConfigNodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeSpec) DeepCopyInto(out *MachineConfigNodeSpec) { + *out = *in + out.Node = in.Node + out.Pool = in.Pool + out.ConfigVersion = in.ConfigVersion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeSpec. +func (in *MachineConfigNodeSpec) DeepCopy() *MachineConfigNodeSpec { + if in == nil { + return nil + } + out := new(MachineConfigNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeSpecMachineConfigVersion) DeepCopyInto(out *MachineConfigNodeSpecMachineConfigVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeSpecMachineConfigVersion. +func (in *MachineConfigNodeSpecMachineConfigVersion) DeepCopy() *MachineConfigNodeSpecMachineConfigVersion { + if in == nil { + return nil + } + out := new(MachineConfigNodeSpecMachineConfigVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatus) DeepCopyInto(out *MachineConfigNodeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigVersion != nil { + in, out := &in.ConfigVersion, &out.ConfigVersion + *out = new(MachineConfigNodeStatusMachineConfigVersion) + **out = **in + } + if in.PinnedImageSets != nil { + in, out := &in.PinnedImageSets, &out.PinnedImageSets + *out = make([]MachineConfigNodeStatusPinnedImageSet, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatus. +func (in *MachineConfigNodeStatus) DeepCopy() *MachineConfigNodeStatus { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatusMachineConfigVersion) DeepCopyInto(out *MachineConfigNodeStatusMachineConfigVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatusMachineConfigVersion. +func (in *MachineConfigNodeStatusMachineConfigVersion) DeepCopy() *MachineConfigNodeStatusMachineConfigVersion { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatusMachineConfigVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatusPinnedImageSet) DeepCopyInto(out *MachineConfigNodeStatusPinnedImageSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatusPinnedImageSet. +func (in *MachineConfigNodeStatusPinnedImageSet) DeepCopy() *MachineConfigNodeStatusPinnedImageSet { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatusPinnedImageSet) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineConfigPool) DeepCopyInto(out *MachineConfigPool) { *out = *in diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml index 882717e778..c290c60eba 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml @@ -29,16 +29,14 @@ controllerconfigs.machineconfiguration.openshift.io: Category: "" FeatureGates: - AWSClusterHostedDNS - - BareMetalLoadBalancer - DualReplica - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNS - GCPCustomAPIEndpoints - - GCPLabelsTags - HighlyAvailableArbiter + - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets - VSphereMultiNetworks - - VSphereMultiVCenters FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" @@ -111,6 +109,82 @@ machineconfigs.machineconfiguration.openshift.io: TopLevelFeatureGates: [] Version: v1 +machineconfignodes.machineconfiguration.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2255 + CRDName: machineconfignodes.machineconfiguration.openshift.io + Capability: "" + Category: "" + FeatureGates: + - MachineConfigNodes + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: machineconfiguration.openshift.io + HasStatus: true + KindName: MachineConfigNode + Labels: + openshift.io/operator-managed: "" + PluralName: machineconfignodes + PrinterColumns: + - jsonPath: .spec.pool.name + name: PoolName + type: string + - jsonPath: .spec.configVersion.desired + name: DesiredConfig + type: string + - jsonPath: .status.configVersion.current + name: CurrentConfig + type: string + - jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePrepared")].status + name: UpdatePrepared + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateExecuted")].status + name: UpdateExecuted + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePostActionComplete")].status + name: UpdatePostActionComplete + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateComplete")].status + name: UpdateComplete + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Resumed")].status + name: Resumed + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="AppliedFilesAndOS")].status + name: UpdatedFilesAndOS + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Cordoned")].status + name: CordonedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Drained")].status + name: DrainedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="RebootedNode")].status + name: RebootedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Uncordoned")].status + name: UncordonedNode + priority: 1 + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - MachineConfigNodes + Version: v1 + machineconfigpools.machineconfiguration.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1453 diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go index a5d9ff6f23..92f536b9a8 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go @@ -368,6 +368,91 @@ func (PoolSynchronizerStatus) SwaggerDoc() map[string]string { return map_PoolSynchronizerStatus } +var map_MCOObjectReference = map[string]string{ + "": "MCOObjectReference holds information about an object the MCO either owns or modifies in some way", + "name": "name is the name of the object being referenced. For example, this can represent a machine config pool or node name. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", +} + +func (MCOObjectReference) SwaggerDoc() map[string]string { + return map_MCOObjectReference +} + +var map_MachineConfigNode = map[string]string{ + "": "MachineConfigNode describes the health of the Machines on the system Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object metadata.", + "spec": "spec describes the configuration of the machine config node.", + "status": "status describes the last observed state of this machine config node.", +} + +func (MachineConfigNode) SwaggerDoc() map[string]string { + return map_MachineConfigNode +} + +var map_MachineConfigNodeList = map[string]string{ + "": "MachineConfigNodeList describes all of the MachinesStates on the system\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list metadata.", + "items": "items contains a collection of MachineConfigNode resources.", +} + +func (MachineConfigNodeList) SwaggerDoc() map[string]string { + return map_MachineConfigNodeList +} + +var map_MachineConfigNodeSpec = map[string]string{ + "": "MachineConfigNodeSpec describes the MachineConfigNode we are managing.", + "node": "node contains a reference to the node for this machine config node.", + "pool": "pool contains a reference to the machine config pool that this machine config node's referenced node belongs to.", + "configVersion": "configVersion holds the desired config version for the node targeted by this machine config node resource. The desired version represents the machine config the node will attempt to update to and gets set before the machine config operator validates the new machine config against the current machine config.", +} + +func (MachineConfigNodeSpec) SwaggerDoc() map[string]string { + return map_MachineConfigNodeSpec +} + +var map_MachineConfigNodeSpecMachineConfigVersion = map[string]string{ + "": "MachineConfigNodeSpecMachineConfigVersion holds the desired config version for the current observed machine config node. When Current is not equal to Desired, the MachineConfigOperator is in an upgrade phase and the machine config node will take account of upgrade related events. Otherwise, they will be ignored given that certain operations happen both during the MCO's upgrade mode and the daily operations mode.", + "desired": "desired is the name of the machine config that the the node should be upgraded to. This value is set when the machine config pool generates a new version of its rendered configuration. When this value is changed, the machine config daemon starts the node upgrade process. This value gets set in the machine config node spec once the machine config has been targeted for upgrade and before it is validated. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", +} + +func (MachineConfigNodeSpecMachineConfigVersion) SwaggerDoc() map[string]string { + return map_MachineConfigNodeSpecMachineConfigVersion +} + +var map_MachineConfigNodeStatus = map[string]string{ + "": "MachineConfigNodeStatus holds the reported information on a particular machine config node.", + "conditions": "conditions represent the observations of a machine config node's current state. Valid types are: UpdatePrepared, UpdateExecuted, UpdatePostActionComplete, UpdateComplete, Updated, Resumed, Drained, AppliedFilesAndOS, Cordoned, Uncordoned, RebootedNode, NodeDegraded, PinnedImageSetsProgressing, and PinnedImageSetsDegraded.", + "observedGeneration": "observedGeneration represents the generation of the MachineConfigNode object observed by the Machine Config Operator's controller. This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec.", + "configVersion": "configVersion describes the current and desired machine config version for this node.", + "pinnedImageSets": "pinnedImageSets describes the current and desired pinned image sets for this node.", +} + +func (MachineConfigNodeStatus) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatus +} + +var map_MachineConfigNodeStatusMachineConfigVersion = map[string]string{ + "": "MachineConfigNodeStatusMachineConfigVersion holds the current and desired config versions as last updated in the MCN status. When the current and desired versions do not match, the machine config pool is processing an upgrade and the machine config node will monitor the upgrade process. When the current and desired versions do match, the machine config node will ignore these events given that certain operations happen both during the MCO's upgrade mode and the daily operations mode.", + "current": "current is the name of the machine config currently in use on the node. This value is updated once the machine config daemon has completed the update of the configuration for the node. This value should match the desired version unless an upgrade is in progress. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", + "desired": "desired is the MachineConfig the node wants to upgrade to. This value gets set in the machine config node status once the machine config has been validated against the current machine config. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", +} + +func (MachineConfigNodeStatusMachineConfigVersion) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatusMachineConfigVersion +} + +var map_MachineConfigNodeStatusPinnedImageSet = map[string]string{ + "": "MachineConfigNodeStatusPinnedImageSet holds information about the current, desired, and failed pinned image sets for the observed machine config node.", + "name": "name is the name of the pinned image set. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", + "currentGeneration": "currentGeneration is the generation of the pinned image set that has most recently been successfully pulled and pinned on this node.", + "desiredGeneration": "desiredGeneration is the generation of the pinned image set that is targeted to be pulled and pinned on this node.", + "lastFailedGeneration": "lastFailedGeneration is the generation of the most recent pinned image set that failed to be pulled and pinned on this node.", + "lastFailedGenerationError": "lastFailedGenerationError is the error explaining why the desired images failed to be pulled and pinned. The error is an empty string if the image pull and pin is successful.", +} + +func (MachineConfigNodeStatusPinnedImageSet) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatusPinnedImageSet +} + var map_MachineConfigReference = map[string]string{ "": "Refers to the name of a rendered MachineConfig (e.g., \"rendered-worker-ec40d2965ff81bce7cd7a7e82a680739\", etc.): the build targets this MachineConfig, this is often used to tell us whether we need an update.", "name": "name is the name of the rendered MachineConfig object. This value should be between 10 and 253 characters, and must contain only lowercase alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character.", @@ -445,7 +530,7 @@ func (MachineOSConfigReference) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", "group": "group of the referent. The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. Example: \"\", \"apps\", \"build.openshift.io\", etc.", - "resource": "resource of the referent. This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, and should start and end with an alphanumeric character. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", + "resource": "resource of the referent. This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, and should start with an alphabetic character and end with an alphanumeric character. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", "namespace": "namespace of the referent. This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, and should start and end with an alphanumeric character.", "name": "name of the referent. The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character.", } diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go index 7e60fd7cbf..6eb7c4a6e0 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go @@ -90,6 +90,7 @@ type MachineOSBuildStatus struct { // +optional BuilderReference *MachineOSBuilderReference `json:"builderReference"` // relatedObjects is a list of objects that are related to the build process. + // +optional RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` // buildStart describes when the build started. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="buildStart is immutable once set" @@ -98,6 +99,7 @@ type MachineOSBuildStatus struct { // buildEnd describes when the build ended. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="buildEnd is immutable once set" //+optional + // +optional BuildEnd *metav1.Time `json:"buildEnd,omitempty"` // finalImagePushSpec describes the fully qualified pushspec produced by this build that the final image can be. Must be in sha format. // +kubebuilder:validation:XValidation:rule=`((self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go index b992b69d02..c2bd805c47 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go @@ -174,7 +174,7 @@ type MachineOSContainerfile struct { // +kubebuilder:validation:Enum:=arm64;amd64;ppc64le;s390x;aarch64;x86_64;noarch // +kubebuilder:default:=noarch // +optional - ContainerfileArch ContainerfileArch `json:"containerfileArch"` + ContainerfileArch ContainerfileArch `json:"containerfileArch,omitempty"` // content is the custom content to be built // +required Content string `json:"content"` diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto index 1999f71e8d..c83a2fb174 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto +++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto @@ -51,7 +51,6 @@ message EgressRouter { } // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface -// +kubebuilder:validation:Required message EgressRouterAddress { // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. // +required @@ -84,7 +83,6 @@ message EgressRouterList { // Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode. // Each config consists of parameters specific to that mode. // +k8s:openapi-gen=true -// +kubebuilder:validation:Required message EgressRouterSpec { // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. // +required diff --git a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go index 541c3b5597..d5327ffaf6 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go @@ -49,7 +49,6 @@ type EgressRouter struct { // Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode. // Each config consists of parameters specific to that mode. // +k8s:openapi-gen=true -// +kubebuilder:validation:Required type EgressRouterSpec struct { // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. // +required @@ -176,7 +175,6 @@ type MacvlanConfig struct { } // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface -// +kubebuilder:validation:Required type EgressRouterAddress struct { // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. // +required @@ -249,7 +247,7 @@ type EgressRouterStatus struct { // +required // +listType=map // +listMapKey=type - Conditions []EgressRouterStatusCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []EgressRouterStatusCondition `json:"conditions" protobuf:"bytes,1,rep,name=conditions"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go index 498f78df6d..0d71e3f8b0 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -258,7 +258,10 @@ const ( type OpenShiftControllerManagerConfig struct { metav1.TypeMeta `json:",inline"` - KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"` + // KubeClientConfig is no longer being used. + // The field is being ignored by OCM. + // + // KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"` // servingInfo describes how to start serving ServingInfo *configv1.HTTPServingInfo `json:"servingInfo"` diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go index fbb2e5e2b9..4d7774caa4 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go @@ -459,7 +459,6 @@ func (in *OpenShiftAPIServerConfig) DeepCopyObject() runtime.Object { func (in *OpenShiftControllerManagerConfig) DeepCopyInto(out *OpenShiftControllerManagerConfig) { *out = *in out.TypeMeta = in.TypeMeta - out.KubeClientConfig = in.KubeClientConfig if in.ServingInfo != nil { in, out := &in.ServingInfo, &out.ServingInfo *out = new(configv1.HTTPServingInfo) diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index 4b0c48a103..3a2141abb9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -124,6 +124,7 @@ type OperatorStatus struct { Version string `json:"version,omitempty"` // readyReplicas indicates how many replicas are ready and at the desired state + // +optional ReadyReplicas int32 `json:"readyReplicas"` // latestAvailableRevision is the deploymentID of the most recent deployment @@ -207,7 +208,7 @@ type OperatorCondition struct { // +required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=date-time - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` Reason string `json:"reason,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go index bf103f19bb..7cc22d1e4e 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -26,7 +26,7 @@ type Authentication struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec AuthenticationSpec `json:"spec,omitempty"` + Spec AuthenticationSpec `json:"spec"` // +optional Status AuthenticationStatus `json:"status,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index c2f25e4e64..e030a65c86 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -27,7 +27,7 @@ type Console struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec ConsoleSpec `json:"spec,omitempty"` + Spec ConsoleSpec `json:"spec"` // +optional Status ConsoleStatus `json:"status,omitempty"` } @@ -203,6 +203,7 @@ type ConfigMapFileReference struct { // +required Key string `json:"key"` } + // FileReferenceSource is used by the console to locate the specified file containing a custom logo. // +kubebuilder:validation:XValidation:rule="has(self.from) && self.from == 'ConfigMap' ? has(self.configMap) : !has(self.configMap)",message="configMap is required when from is 'ConfigMap', and forbidden otherwise." type FileReferenceSource struct { @@ -479,7 +480,6 @@ type PerspectiveVisibility struct { // Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id != 'dev'? !has(self.pinnedResources) : true",message="pinnedResources is allowed only for dev and forbidden for other perspectives" -// +optional type Perspective struct { // id defines the id of the perspective. // Example: "dev", "admin". diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index b25133a42f..2799904482 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -169,7 +169,6 @@ type AWSCSIDriverConfigSpec struct { KMSKeyARN string `json:"kmsKeyARN,omitempty"` // efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver. - // +openshift:enable:FeatureGate=AWSEFSDriverVolumeMetrics // +optional EFSVolumeMetrics *AWSEFSVolumeMetrics `json:"efsVolumeMetrics,omitempty"` } @@ -348,7 +347,6 @@ type VSphereCSIDriverConfigSpec struct { // Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=32 - // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` @@ -357,7 +355,6 @@ type VSphereCSIDriverConfigSpec struct { // Snapshots for VSAN can not be disabled using this parameter. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=32 - // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` @@ -366,7 +363,6 @@ type VSphereCSIDriverConfigSpec struct { // Snapshots for VVOL can not be disabled using this parameter. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=32 - // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index 375ec5fb7f..252f3b3990 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -38,7 +38,6 @@ type EtcdSpec struct { // Valid values are "", "Standard" and "Slower". // "" means no opinion and the platform is left to choose a reasonable default // which is subject to change without notice. - // +openshift:enable:FeatureGate=HardwareSpeed // +optional HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` @@ -57,7 +56,8 @@ type EtcdSpec struct { type EtcdStatus struct { StaticPodOperatorStatus `json:",inline"` - HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` + // +optional + HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` } const ( diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 240ab12c77..35b50a8fbd 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -1401,7 +1401,7 @@ type IngressControllerCaptureHTTPCookieUnion struct { // // +unionDiscriminator // +required - MatchType CookieMatchType `json:"matchType,omitempty"` + MatchType CookieMatchType `json:"matchType"` // name specifies a cookie name. Its value must be a valid HTTP cookie // name as defined in RFC 6265 section 4.1. @@ -1554,7 +1554,6 @@ type IngressControllerHTTPUniqueIdHeaderPolicy struct { // (for example, "X-Forwarded-For") in the desired capitalization. The value // must be a valid HTTP header name as defined in RFC 2616 section 4.2. // -// +optional // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" // +kubebuilder:validation:MinLength=0 // +kubebuilder:validation:MaxLength=1024 @@ -2021,17 +2020,21 @@ var ( type IngressControllerStatus struct { // availableReplicas is number of observed available replicas according to the // ingress controller deployment. + // +optional AvailableReplicas int32 `json:"availableReplicas"` // selector is a label selector, in string format, for ingress controller pods // corresponding to the IngressController. The number of matching pods should // equal the value of availableReplicas. + // +optional Selector string `json:"selector"` // domain is the actual domain in use. + // +optional Domain string `json:"domain"` // endpointPublishingStrategy is the actual strategy in use. + // +optional EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` // conditions is a list of conditions and their status. @@ -2068,6 +2071,7 @@ type IngressControllerStatus struct { // - False if any of those conditions are unsatisfied. // +listType=map // +listMapKey=type + // +optional Conditions []OperatorCondition `json:"conditions,omitempty"` // tlsProfile is the TLS connection configuration that is in effect. diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index ce00b4b62c..7d468755a1 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -17,7 +17,6 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 -// +openshift:compatibility-gen:level=1 type KubeAPIServer struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 4c53734d86..2d88bcd770 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -53,7 +53,6 @@ type MachineConfigurationSpec struct { // MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow // for less downtime when doing small configuration updates to the cluster. This configuration // has no effect on cluster upgrades which will still incur node disruption where required. - // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"` } @@ -94,7 +93,6 @@ type MachineConfigurationStatus struct { // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, // and will be used by the Machine Config Daemon during future node updates. - // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 713939ddbb..111240eecf 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -431,16 +431,14 @@ type OVNKubernetesConfig struct { // v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // Default is 100.64.0.0/16 // +optional V4InternalSubnet string `json:"v4InternalSubnet,omitempty"` // v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // Default is fd98::/64 // +optional V6InternalSubnet string `json:"v6InternalSubnet,omitempty"` @@ -478,11 +476,10 @@ type IPv4OVNKubernetesConfig struct { // architecture that connects the cluster routers on each node together to enable // east west traffic. The subnet chosen should not overlap with other networks // specified for OVN-Kubernetes as well as other networks used on the host. - // The value cannot be changed after installation. // When ommitted, this means no opinion and the platform is left to choose a reasonable // default which is subject to change over time. // The current default subnet is 100.88.0.0/16 - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -493,10 +490,9 @@ type IPv4OVNKubernetesConfig struct { // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // The current default value is 100.64.0.0/16 - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -512,10 +508,9 @@ type IPv6OVNKubernetesConfig struct { // architecture that connects the cluster routers on each node together to enable // east west traffic. The subnet chosen should not overlap with other networks // specified for OVN-Kubernetes as well as other networks used on the host. - // The value cannot be changed after installation. // When ommitted, this means no opinion and the platform is left to choose a reasonable // default which is subject to change over time. - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The current default subnet is fd97::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted @@ -527,9 +522,8 @@ type IPv6OVNKubernetesConfig struct { // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. - // The subnet must be large enough to accomadate one IP per node in your cluster + // subnet must be larger than the number of nodes. + // The subnet must be large enough to accommodate one IP per node in your cluster // The current default value is fd98::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted @@ -646,7 +640,7 @@ type IPv4GatewayConfig struct { // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must // be large enough to accommodate 6 IPs (maximum prefix length /29). // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. - // The current default subnet is 169.254.169.0/29 + // The current default subnet is 169.254.0.0/17 // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -665,7 +659,7 @@ type IPv6GatewayConfig struct { // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must // be large enough to accommodate 6 IPs (maximum prefix length /125). // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. - // The current default subnet is fd69::/125 + // The current default subnet is fd69::/112 // Note that IPV6 dual addresses are not permitted // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 81f2a87a99..5b2ca202f9 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -69,9 +69,7 @@ clustercsidrivers.operator.openshift.io: Capability: "" Category: "" FeatureGates: - - AWSEFSDriverVolumeMetrics - VSphereConfigurableMaxAllowedBlockVolumesPerNode - - VSphereDriverConfiguration FilenameOperatorName: csi-driver FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_50" @@ -157,7 +155,6 @@ etcds.operator.openshift.io: Category: coreoperators FeatureGates: - EtcdBackendQuota - - HardwareSpeed FilenameOperatorName: etcd FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_12" @@ -309,7 +306,6 @@ machineconfigurations.operator.openshift.io: Category: "" FeatureGates: - ManagedBootImages - - NodeDisruptionPolicy FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index a0fa4fe475..582f9686ff 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -1712,7 +1712,7 @@ func (IPsecFullModeConfig) SwaggerDoc() map[string]string { var map_IPv4GatewayConfig = map[string]string{ "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", - "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.0.0/17 The value must be in proper IPV4 CIDR format", } func (IPv4GatewayConfig) SwaggerDoc() map[string]string { @@ -1720,8 +1720,8 @@ func (IPv4GatewayConfig) SwaggerDoc() map[string]string { } var map_IPv4OVNKubernetesConfig = map[string]string{ - "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", - "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The current default value is 100.64.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The current default value is 100.64.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", } func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1730,7 +1730,7 @@ func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { var map_IPv6GatewayConfig = map[string]string{ "": "IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes", - "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/112 Note that IPV6 dual addresses are not permitted", } func (IPv6GatewayConfig) SwaggerDoc() map[string]string { @@ -1738,8 +1738,8 @@ func (IPv6GatewayConfig) SwaggerDoc() map[string]string { } var map_IPv6OVNKubernetesConfig = map[string]string{ - "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accomadate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", - "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accommodate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The subnet must be large enough to accommodate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", } func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1840,8 +1840,8 @@ var map_OVNKubernetesConfig = map[string]string{ "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", - "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16", - "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/64", + "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is 100.64.0.0/16", + "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is fd98::/64", "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.", "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go index fe56b0eab2..dc5d81cea1 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go @@ -41,7 +41,6 @@ type EtcdBackupSpec struct { PVCName string `json:"pvcName"` } -// +kubebuilder:validation:Optional type EtcdBackupStatus struct { // conditions provide details on the status of the etcd backup job. // +listType=map diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index adfce0d19f..e055eb0d26 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -366,6 +366,7 @@ message RouteStatus { // ingress points may contain duplicate Host or RouterName values. Routes // are considered live once they are `Ready` // +listType=atomic + // +optional repeated RouteIngress ingress = 1; } diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 2feb425a2c..5a61f477e7 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -354,6 +354,7 @@ type RouteStatus struct { // ingress points may contain duplicate Host or RouterName values. Routes // are considered live once they are `Ready` // +listType=atomic + // +optional Ingress []RouteIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto index eeda6835ae..95220f74b5 100644 --- a/vendor/github.com/openshift/api/samples/v1/generated.proto +++ b/vendor/github.com/openshift/api/samples/v1/generated.proto @@ -133,12 +133,14 @@ message ConfigStatus { // the operator back on (i.e. "Managed") when it was previously "Unmanaged". // +patchMergeKey=type // +patchStrategy=merge + // +optional optional string managementState = 1; // conditions represents the available maintenance status of the sample // imagestreams and templates. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated ConfigCondition conditions = 2; // samplesRegistry allows for the specification of which registry is accessed @@ -147,12 +149,14 @@ message ConfigStatus { // defaults to registry.redhat.io. // +patchMergeKey=type // +patchStrategy=merge + // +optional optional string samplesRegistry = 3; // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the // supported choices. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated string architectures = 5; // skippedImagestreams specifies names of image streams that should NOT be @@ -162,6 +166,7 @@ message ConfigStatus { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated string skippedImagestreams = 6; // skippedTemplates specifies names of templates that should NOT be @@ -171,11 +176,13 @@ message ConfigStatus { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated string skippedTemplates = 7; // version is the value of the operator's payload based version indicator when it was last successfully processed // +patchMergeKey=type // +patchStrategy=merge + // +optional optional string version = 8; } diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go index 320500b0ce..8d2cf7df50 100644 --- a/vendor/github.com/openshift/api/samples/v1/types_config.go +++ b/vendor/github.com/openshift/api/samples/v1/types_config.go @@ -104,11 +104,13 @@ type ConfigStatus struct { // the operator back on (i.e. "Managed") when it was previously "Unmanaged". // +patchMergeKey=type // +patchStrategy=merge + // +optional ManagementState operatorv1.ManagementState `json:"managementState,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=managementState"` // conditions represents the available maintenance status of the sample // imagestreams and templates. // +patchMergeKey=type // +patchStrategy=merge + // +optional Conditions []ConfigCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // samplesRegistry allows for the specification of which registry is accessed @@ -117,12 +119,14 @@ type ConfigStatus struct { // defaults to registry.redhat.io. // +patchMergeKey=type // +patchStrategy=merge + // +optional SamplesRegistry string `json:"samplesRegistry,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,3,rep,name=samplesRegistry"` // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the // supported choices. // +patchMergeKey=type // +patchStrategy=merge + // +optional Architectures []string `json:"architectures,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=architectures"` // skippedImagestreams specifies names of image streams that should NOT be @@ -132,6 +136,7 @@ type ConfigStatus struct { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional SkippedImagestreams []string `json:"skippedImagestreams,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=skippedImagestreams"` // skippedTemplates specifies names of templates that should NOT be @@ -141,11 +146,13 @@ type ConfigStatus struct { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional SkippedTemplates []string `json:"skippedTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,7,rep,name=skippedTemplates"` // version is the value of the operator's payload based version indicator when it was last successfully processed // +patchMergeKey=type // +patchStrategy=merge + // +optional Version string `json:"version,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=version"` } diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index 0e6bb094fb..933de5450e 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -73,6 +73,7 @@ message PodSecurityPolicyReviewSpec { // PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. message PodSecurityPolicyReviewStatus { // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + // +optional repeated ServiceAccountPodSecurityPolicyReviewStatus allowedServiceAccounts = 1; } @@ -134,14 +135,17 @@ message PodSecurityPolicySubjectReviewStatus { // allowedBy is a reference to the rule that allows the PodTemplateSpec. // A rule can be a SecurityContextConstraint or a PodSecurityPolicy // A `nil`, indicates that it was denied. + // +optional optional .k8s.io.api.core.v1.ObjectReference allowedBy = 1; // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there // is no information available. + // +optional optional string reason = 2; // template is the PodTemplateSpec after the defaulting is applied. + // +optional optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 18585e97c0..fb491480d7 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -386,14 +386,17 @@ type PodSecurityPolicySubjectReviewStatus struct { // allowedBy is a reference to the rule that allows the PodTemplateSpec. // A rule can be a SecurityContextConstraint or a PodSecurityPolicy // A `nil`, indicates that it was denied. + // +optional AllowedBy *corev1.ObjectReference `json:"allowedBy,omitempty" protobuf:"bytes,1,opt,name=allowedBy"` // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there // is no information available. + // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` // template is the PodTemplateSpec after the defaulting is applied. + // +optional Template corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } @@ -465,6 +468,7 @@ type PodSecurityPolicyReviewSpec struct { // PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. type PodSecurityPolicyReviewStatus struct { // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + // +optional AllowedServiceAccounts []ServiceAccountPodSecurityPolicyReviewStatus `json:"allowedServiceAccounts" protobuf:"bytes,1,rep,name=allowedServiceAccounts"` } diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go index 2a4a0d1b6c..51466b0d4e 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go @@ -47,7 +47,7 @@ type SharedConfigMap struct { // spec is the specification of the desired shared configmap // +required - Spec SharedConfigMapSpec `json:"spec,omitempty"` + Spec SharedConfigMapSpec `json:"spec"` // status is the observed status of the shared configmap Status SharedConfigMapStatus `json:"status,omitempty"` diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go index be06f97749..9551fb4a77 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go @@ -46,7 +46,7 @@ type SharedSecret struct { // spec is the specification of the desired shared secret // +required - Spec SharedSecretSpec `json:"spec,omitempty"` + Spec SharedSecretSpec `json:"spec"` // status is the observed status of the shared secret Status SharedSecretStatus `json:"status,omitempty"` diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto index 8f27eb48a0..e1d7a21d46 100644 --- a/vendor/github.com/openshift/api/template/v1/generated.proto +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -241,9 +241,11 @@ message TemplateInstanceSpec { message TemplateInstanceStatus { // conditions represent the latest available observations of a // TemplateInstance's current state. + // +optional repeated TemplateInstanceCondition conditions = 1; // objects references the objects created by the TemplateInstance. + // +optional repeated TemplateInstanceObject objects = 2; } diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go index 5510b0f90b..e3dee62fae 100644 --- a/vendor/github.com/openshift/api/template/v1/types.go +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -179,9 +179,11 @@ func (t ExtraValue) String() string { type TemplateInstanceStatus struct { // conditions represent the latest available observations of a // TemplateInstance's current state. + // +optional Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // objects references the objects created by the TemplateInstance. + // +optional Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` } diff --git a/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml b/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml index 1e59c02c25..7c15f83e3e 100644 --- a/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml +++ b/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.22-openshift-4.17 + tag: rhel-9-release-golang-1.23-openshift-4.19 diff --git a/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker b/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker index 8d7058a155..0651b7e247 100644 --- a/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker +++ b/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker @@ -1,12 +1,12 @@ # This Dockerfile must be on the top-level of this repo, because it needs to copy # both commitchecker/ and make/ into the build container. -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.17 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder WORKDIR /go/src/github.com/openshift/build-machinery-go COPY . . RUN make -C commitchecker -FROM registry.ci.openshift.org/ocp/4.17:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 COPY --from=builder /go/src/github.com/openshift/build-machinery-go/commitchecker/commitchecker /usr/bin/ RUN dnf install --setopt=tsflags=nodocs -y git && \ dnf clean all && rm -rf /var/cache/yum/* diff --git a/vendor/github.com/openshift/build-machinery-go/make/targets/golang/verify-update.mk b/vendor/github.com/openshift/build-machinery-go/make/targets/golang/verify-update.mk index 3b71e29ddb..0c72a276c8 100644 --- a/vendor/github.com/openshift/build-machinery-go/make/targets/golang/verify-update.mk +++ b/vendor/github.com/openshift/build-machinery-go/make/targets/golang/verify-update.mk @@ -3,11 +3,12 @@ include $(addprefix $(dir $(lastword $(MAKEFILE_LIST))), \ ) go_files_count :=$(words $(GO_FILES)) +chunk_size :=1000 verify-gofmt: $(info Running `$(GOFMT) $(GOFMT_FLAGS)` on $(go_files_count) file(s).) @TMP=$$( mktemp ); \ - $(GOFMT) $(GOFMT_FLAGS) $(GO_FILES) | tee $${TMP}; \ + find . -name '*.go' -not -path '*/vendor/*' -not -path '*/_output/*' -print | xargs -n $(chunk_size) $(GOFMT) $(GOFMT_FLAGS) | tee $${TMP}; \ if [ -s $${TMP} ]; then \ echo "$@ failed - please run \`make update-gofmt\`"; \ exit 1; \ @@ -16,7 +17,7 @@ verify-gofmt: update-gofmt: $(info Running `$(GOFMT) $(GOFMT_FLAGS) -w` on $(go_files_count) file(s).) - @$(GOFMT) $(GOFMT_FLAGS) -w $(GO_FILES) + @find . -name '*.go' -not -path '*/vendor/*' -not -path '*/_output/*' -print | xargs -n $(chunk_size) $(GOFMT) $(GOFMT_FLAGS) -w .PHONY: update-gofmt diff --git a/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/yq.mk b/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/yq.mk index 07726d87cd..0854374fe5 100644 --- a/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/yq.mk +++ b/vendor/github.com/openshift/build-machinery-go/make/targets/openshift/yq.mk @@ -8,8 +8,7 @@ include $(addprefix $(dir $(lastword $(MAKEFILE_LIST))), \ YQ_VERSION ?=2.4.0 YQ ?=$(PERMANENT_TMP_GOPATH)/bin/yq-$(YQ_VERSION) -yq_dir :=$(dir $(YQ)) - +yq_dir =$(dir $(YQ)) ensure-yq: ifeq "" "$(wildcard $(YQ))" diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go new file mode 100644 index 0000000000..1ee4a91fb3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterImagePolicyApplyConfiguration represents a declarative configuration of the ClusterImagePolicy type for use +// with apply. +type ClusterImagePolicyApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterImagePolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterImagePolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterImagePolicy constructs a declarative configuration of the ClusterImagePolicy type for use with +// apply. +func ClusterImagePolicy(name string) *ClusterImagePolicyApplyConfiguration { + b := &ClusterImagePolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterImagePolicy") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractClusterImagePolicy extracts the applied configuration owned by fieldManager from +// clusterImagePolicy. If no managedFields are found in clusterImagePolicy for fieldManager, a +// ClusterImagePolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterImagePolicy must be a unmodified ClusterImagePolicy API object that was retrieved from the Kubernetes API. +// ExtractClusterImagePolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterImagePolicy(clusterImagePolicy *configv1.ClusterImagePolicy, fieldManager string) (*ClusterImagePolicyApplyConfiguration, error) { + return extractClusterImagePolicy(clusterImagePolicy, fieldManager, "") +} + +// ExtractClusterImagePolicyStatus is the same as ExtractClusterImagePolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterImagePolicyStatus(clusterImagePolicy *configv1.ClusterImagePolicy, fieldManager string) (*ClusterImagePolicyApplyConfiguration, error) { + return extractClusterImagePolicy(clusterImagePolicy, fieldManager, "status") +} + +func extractClusterImagePolicy(clusterImagePolicy *configv1.ClusterImagePolicy, fieldManager string, subresource string) (*ClusterImagePolicyApplyConfiguration, error) { + b := &ClusterImagePolicyApplyConfiguration{} + err := managedfields.ExtractInto(clusterImagePolicy, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterImagePolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterImagePolicy.Name) + + b.WithKind("ClusterImagePolicy") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithKind(value string) *ClusterImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithAPIVersion(value string) *ClusterImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithName(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithGenerateName(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithNamespace(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithUID(value types.UID) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithResourceVersion(value string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithGeneration(value int64) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterImagePolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ClusterImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterImagePolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithSpec(value *ClusterImagePolicySpecApplyConfiguration) *ClusterImagePolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterImagePolicyApplyConfiguration) WithStatus(value *ClusterImagePolicyStatusApplyConfiguration) *ClusterImagePolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go new file mode 100644 index 0000000000..6c86d66d47 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use +// with apply. +type ClusterImagePolicySpecApplyConfiguration struct { + Scopes []configv1.ImageScope `json:"scopes,omitempty"` + Policy *PolicyApplyConfiguration `json:"policy,omitempty"` +} + +// ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with +// apply. +func ClusterImagePolicySpec() *ClusterImagePolicySpecApplyConfiguration { + return &ClusterImagePolicySpecApplyConfiguration{} +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1.ImageScope) *ClusterImagePolicySpecApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { + b.Policy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicystatus.go new file mode 100644 index 0000000000..f508f70912 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicystatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterImagePolicyStatusApplyConfiguration represents a declarative configuration of the ClusterImagePolicyStatus type for use +// with apply. +type ClusterImagePolicyStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ClusterImagePolicyStatusApplyConfiguration constructs a declarative configuration of the ClusterImagePolicyStatus type for use with +// apply. +func ClusterImagePolicyStatus() *ClusterImagePolicyStatusApplyConfiguration { + return &ClusterImagePolicyStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ClusterImagePolicyStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ClusterImagePolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/extramapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/extramapping.go new file mode 100644 index 0000000000..4100ed7ed3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/extramapping.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExtraMappingApplyConfiguration represents a declarative configuration of the ExtraMapping type for use +// with apply. +type ExtraMappingApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// ExtraMappingApplyConfiguration constructs a declarative configuration of the ExtraMapping type for use with +// apply. +func ExtraMapping() *ExtraMappingApplyConfiguration { + return &ExtraMappingApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *ExtraMappingApplyConfiguration) WithKey(value string) *ExtraMappingApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *ExtraMappingApplyConfiguration) WithValueExpression(value string) *ExtraMappingApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go new file mode 100644 index 0000000000..48b553580d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// FulcioCAWithRekorApplyConfiguration represents a declarative configuration of the FulcioCAWithRekor type for use +// with apply. +type FulcioCAWithRekorApplyConfiguration struct { + FulcioCAData []byte `json:"fulcioCAData,omitempty"` + RekorKeyData []byte `json:"rekorKeyData,omitempty"` + FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"` +} + +// FulcioCAWithRekorApplyConfiguration constructs a declarative configuration of the FulcioCAWithRekor type for use with +// apply. +func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration { + return &FulcioCAWithRekorApplyConfiguration{} +} + +// WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FulcioCAData field. +func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration { + for i := range values { + b.FulcioCAData = append(b.FulcioCAData, values[i]) + } + return b +} + +// WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RekorKeyData field. +func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration { + for i := range values { + b.RekorKeyData = append(b.RekorKeyData, values[i]) + } + return b +} + +// WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FulcioSubject field is set to the value of the last call. +func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration { + b.FulcioSubject = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go new file mode 100644 index 0000000000..6ccc3746ae --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImagePolicyApplyConfiguration represents a declarative configuration of the ImagePolicy type for use +// with apply. +type ImagePolicyApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImagePolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ImagePolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ImagePolicy constructs a declarative configuration of the ImagePolicy type for use with +// apply. +func ImagePolicy(name, namespace string) *ImagePolicyApplyConfiguration { + b := &ImagePolicyApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ImagePolicy") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractImagePolicy extracts the applied configuration owned by fieldManager from +// imagePolicy. If no managedFields are found in imagePolicy for fieldManager, a +// ImagePolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imagePolicy must be a unmodified ImagePolicy API object that was retrieved from the Kubernetes API. +// ExtractImagePolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImagePolicy(imagePolicy *configv1.ImagePolicy, fieldManager string) (*ImagePolicyApplyConfiguration, error) { + return extractImagePolicy(imagePolicy, fieldManager, "") +} + +// ExtractImagePolicyStatus is the same as ExtractImagePolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImagePolicyStatus(imagePolicy *configv1.ImagePolicy, fieldManager string) (*ImagePolicyApplyConfiguration, error) { + return extractImagePolicy(imagePolicy, fieldManager, "status") +} + +func extractImagePolicy(imagePolicy *configv1.ImagePolicy, fieldManager string, subresource string) (*ImagePolicyApplyConfiguration, error) { + b := &ImagePolicyApplyConfiguration{} + err := managedfields.ExtractInto(imagePolicy, internal.Parser().Type("com.github.openshift.api.config.v1.ImagePolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imagePolicy.Name) + b.WithNamespace(imagePolicy.Namespace) + + b.WithKind("ImagePolicy") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithKind(value string) *ImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithAPIVersion(value string) *ImagePolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithName(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithGenerateName(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithNamespace(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithUID(value types.UID) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithResourceVersion(value string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithGeneration(value int64) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImagePolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ImagePolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ImagePolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithSpec(value *ImagePolicySpecApplyConfiguration) *ImagePolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImagePolicyApplyConfiguration) WithStatus(value *ImagePolicyStatusApplyConfiguration) *ImagePolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go new file mode 100644 index 0000000000..b75165c8d0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use +// with apply. +type ImagePolicySpecApplyConfiguration struct { + Scopes []configv1.ImageScope `json:"scopes,omitempty"` + Policy *PolicyApplyConfiguration `json:"policy,omitempty"` +} + +// ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with +// apply. +func ImagePolicySpec() *ImagePolicySpecApplyConfiguration { + return &ImagePolicySpecApplyConfiguration{} +} + +// WithScopes adds the given value to the Scopes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Scopes field. +func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1.ImageScope) *ImagePolicySpecApplyConfiguration { + for i := range values { + b.Scopes = append(b.Scopes, values[i]) + } + return b +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { + b.Policy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicystatus.go new file mode 100644 index 0000000000..aebb2698c9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicystatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImagePolicyStatusApplyConfiguration represents a declarative configuration of the ImagePolicyStatus type for use +// with apply. +type ImagePolicyStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ImagePolicyStatusApplyConfiguration constructs a declarative configuration of the ImagePolicyStatus type for use with +// apply. +func ImagePolicyStatus() *ImagePolicyStatusApplyConfiguration { + return &ImagePolicyStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ImagePolicyStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ImagePolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go new file mode 100644 index 0000000000..65f27edf8e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PKIApplyConfiguration represents a declarative configuration of the PKI type for use +// with apply. +type PKIApplyConfiguration struct { + CertificateAuthorityRootsData []byte `json:"caRootsData,omitempty"` + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + PKICertificateSubject *PKICertificateSubjectApplyConfiguration `json:"pkiCertificateSubject,omitempty"` +} + +// PKIApplyConfiguration constructs a declarative configuration of the PKI type for use with +// apply. +func PKI() *PKIApplyConfiguration { + return &PKIApplyConfiguration{} +} + +// WithCertificateAuthorityRootsData adds the given value to the CertificateAuthorityRootsData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CertificateAuthorityRootsData field. +func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *PKIApplyConfiguration { + for i := range values { + b.CertificateAuthorityRootsData = append(b.CertificateAuthorityRootsData, values[i]) + } + return b +} + +// WithCertificateAuthorityIntermediatesData adds the given value to the CertificateAuthorityIntermediatesData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CertificateAuthorityIntermediatesData field. +func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *PKIApplyConfiguration { + for i := range values { + b.CertificateAuthorityIntermediatesData = append(b.CertificateAuthorityIntermediatesData, values[i]) + } + return b +} + +// WithPKICertificateSubject sets the PKICertificateSubject field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PKICertificateSubject field is set to the value of the last call. +func (b *PKIApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *PKIApplyConfiguration { + b.PKICertificateSubject = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pkicertificatesubject.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pkicertificatesubject.go new file mode 100644 index 0000000000..70181700b3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pkicertificatesubject.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PKICertificateSubjectApplyConfiguration represents a declarative configuration of the PKICertificateSubject type for use +// with apply. +type PKICertificateSubjectApplyConfiguration struct { + Email *string `json:"email,omitempty"` + Hostname *string `json:"hostname,omitempty"` +} + +// PKICertificateSubjectApplyConfiguration constructs a declarative configuration of the PKICertificateSubject type for use with +// apply. +func PKICertificateSubject() *PKICertificateSubjectApplyConfiguration { + return &PKICertificateSubjectApplyConfiguration{} +} + +// WithEmail sets the Email field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Email field is set to the value of the last call. +func (b *PKICertificateSubjectApplyConfiguration) WithEmail(value string) *PKICertificateSubjectApplyConfiguration { + b.Email = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *PKICertificateSubjectApplyConfiguration) WithHostname(value string) *PKICertificateSubjectApplyConfiguration { + b.Hostname = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go new file mode 100644 index 0000000000..3e29510bf1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PolicyApplyConfiguration represents a declarative configuration of the Policy type for use +// with apply. +type PolicyApplyConfiguration struct { + RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"` + SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"` +} + +// PolicyApplyConfiguration constructs a declarative configuration of the Policy type for use with +// apply. +func Policy() *PolicyApplyConfiguration { + return &PolicyApplyConfiguration{} +} + +// WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RootOfTrust field is set to the value of the last call. +func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration { + b.RootOfTrust = value + return b +} + +// WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignedIdentity field is set to the value of the last call. +func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration { + b.SignedIdentity = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyfulciosubject.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyfulciosubject.go new file mode 100644 index 0000000000..7f61d420c0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyfulciosubject.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PolicyFulcioSubjectApplyConfiguration represents a declarative configuration of the PolicyFulcioSubject type for use +// with apply. +type PolicyFulcioSubjectApplyConfiguration struct { + OIDCIssuer *string `json:"oidcIssuer,omitempty"` + SignedEmail *string `json:"signedEmail,omitempty"` +} + +// PolicyFulcioSubjectApplyConfiguration constructs a declarative configuration of the PolicyFulcioSubject type for use with +// apply. +func PolicyFulcioSubject() *PolicyFulcioSubjectApplyConfiguration { + return &PolicyFulcioSubjectApplyConfiguration{} +} + +// WithOIDCIssuer sets the OIDCIssuer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OIDCIssuer field is set to the value of the last call. +func (b *PolicyFulcioSubjectApplyConfiguration) WithOIDCIssuer(value string) *PolicyFulcioSubjectApplyConfiguration { + b.OIDCIssuer = &value + return b +} + +// WithSignedEmail sets the SignedEmail field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignedEmail field is set to the value of the last call. +func (b *PolicyFulcioSubjectApplyConfiguration) WithSignedEmail(value string) *PolicyFulcioSubjectApplyConfiguration { + b.SignedEmail = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyidentity.go new file mode 100644 index 0000000000..0e4e46be69 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyidentity.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// PolicyIdentityApplyConfiguration represents a declarative configuration of the PolicyIdentity type for use +// with apply. +type PolicyIdentityApplyConfiguration struct { + MatchPolicy *configv1.IdentityMatchPolicy `json:"matchPolicy,omitempty"` + PolicyMatchExactRepository *PolicyMatchExactRepositoryApplyConfiguration `json:"exactRepository,omitempty"` + PolicyMatchRemapIdentity *PolicyMatchRemapIdentityApplyConfiguration `json:"remapIdentity,omitempty"` +} + +// PolicyIdentityApplyConfiguration constructs a declarative configuration of the PolicyIdentity type for use with +// apply. +func PolicyIdentity() *PolicyIdentityApplyConfiguration { + return &PolicyIdentityApplyConfiguration{} +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *PolicyIdentityApplyConfiguration) WithMatchPolicy(value configv1.IdentityMatchPolicy) *PolicyIdentityApplyConfiguration { + b.MatchPolicy = &value + return b +} + +// WithPolicyMatchExactRepository sets the PolicyMatchExactRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyMatchExactRepository field is set to the value of the last call. +func (b *PolicyIdentityApplyConfiguration) WithPolicyMatchExactRepository(value *PolicyMatchExactRepositoryApplyConfiguration) *PolicyIdentityApplyConfiguration { + b.PolicyMatchExactRepository = value + return b +} + +// WithPolicyMatchRemapIdentity sets the PolicyMatchRemapIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyMatchRemapIdentity field is set to the value of the last call. +func (b *PolicyIdentityApplyConfiguration) WithPolicyMatchRemapIdentity(value *PolicyMatchRemapIdentityApplyConfiguration) *PolicyIdentityApplyConfiguration { + b.PolicyMatchRemapIdentity = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchexactrepository.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchexactrepository.go new file mode 100644 index 0000000000..3b4fcbd9ce --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchexactrepository.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// PolicyMatchExactRepositoryApplyConfiguration represents a declarative configuration of the PolicyMatchExactRepository type for use +// with apply. +type PolicyMatchExactRepositoryApplyConfiguration struct { + Repository *configv1.IdentityRepositoryPrefix `json:"repository,omitempty"` +} + +// PolicyMatchExactRepositoryApplyConfiguration constructs a declarative configuration of the PolicyMatchExactRepository type for use with +// apply. +func PolicyMatchExactRepository() *PolicyMatchExactRepositoryApplyConfiguration { + return &PolicyMatchExactRepositoryApplyConfiguration{} +} + +// WithRepository sets the Repository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Repository field is set to the value of the last call. +func (b *PolicyMatchExactRepositoryApplyConfiguration) WithRepository(value configv1.IdentityRepositoryPrefix) *PolicyMatchExactRepositoryApplyConfiguration { + b.Repository = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchremapidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchremapidentity.go new file mode 100644 index 0000000000..3cf5ccf68c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policymatchremapidentity.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// PolicyMatchRemapIdentityApplyConfiguration represents a declarative configuration of the PolicyMatchRemapIdentity type for use +// with apply. +type PolicyMatchRemapIdentityApplyConfiguration struct { + Prefix *configv1.IdentityRepositoryPrefix `json:"prefix,omitempty"` + SignedPrefix *configv1.IdentityRepositoryPrefix `json:"signedPrefix,omitempty"` +} + +// PolicyMatchRemapIdentityApplyConfiguration constructs a declarative configuration of the PolicyMatchRemapIdentity type for use with +// apply. +func PolicyMatchRemapIdentity() *PolicyMatchRemapIdentityApplyConfiguration { + return &PolicyMatchRemapIdentityApplyConfiguration{} +} + +// WithPrefix sets the Prefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Prefix field is set to the value of the last call. +func (b *PolicyMatchRemapIdentityApplyConfiguration) WithPrefix(value configv1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { + b.Prefix = &value + return b +} + +// WithSignedPrefix sets the SignedPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignedPrefix field is set to the value of the last call. +func (b *PolicyMatchRemapIdentityApplyConfiguration) WithSignedPrefix(value configv1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration { + b.SignedPrefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go new file mode 100644 index 0000000000..f1ff91ffbd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use +// with apply. +type PolicyRootOfTrustApplyConfiguration struct { + PolicyType *configv1.PolicyType `json:"policyType,omitempty"` + PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` + FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` + PKI *PKIApplyConfiguration `json:"pki,omitempty"` +} + +// PolicyRootOfTrustApplyConfiguration constructs a declarative configuration of the PolicyRootOfTrust type for use with +// apply. +func PolicyRootOfTrust() *PolicyRootOfTrustApplyConfiguration { + return &PolicyRootOfTrustApplyConfiguration{} +} + +// WithPolicyType sets the PolicyType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyType field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1.PolicyType) *PolicyRootOfTrustApplyConfiguration { + b.PolicyType = &value + return b +} + +// WithPublicKey sets the PublicKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicKey field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { + b.PublicKey = value + return b +} + +// WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FulcioCAWithRekor field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { + b.FulcioCAWithRekor = value + return b +} + +// WithPKI sets the PKI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PKI field is set to the value of the last call. +func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *PKIApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { + b.PKI = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go new file mode 100644 index 0000000000..c1073e882f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PublicKeyApplyConfiguration represents a declarative configuration of the PublicKey type for use +// with apply. +type PublicKeyApplyConfiguration struct { + KeyData []byte `json:"keyData,omitempty"` + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// PublicKeyApplyConfiguration constructs a declarative configuration of the PublicKey type for use with +// apply. +func PublicKey() *PublicKeyApplyConfiguration { + return &PublicKeyApplyConfiguration{} +} + +// WithKeyData adds the given value to the KeyData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the KeyData field. +func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration { + for i := range values { + b.KeyData = append(b.KeyData, values[i]) + } + return b +} + +// WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RekorKeyData field. +func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration { + for i := range values { + b.RekorKeyData = append(b.RekorKeyData, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go index 9b3b0bb561..c748c31115 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go @@ -5,8 +5,10 @@ package v1 // TokenClaimMappingsApplyConfiguration represents a declarative configuration of the TokenClaimMappings type for use // with apply. type TokenClaimMappingsApplyConfiguration struct { - Username *UsernameClaimMappingApplyConfiguration `json:"username,omitempty"` - Groups *PrefixedClaimMappingApplyConfiguration `json:"groups,omitempty"` + Username *UsernameClaimMappingApplyConfiguration `json:"username,omitempty"` + Groups *PrefixedClaimMappingApplyConfiguration `json:"groups,omitempty"` + UID *TokenClaimOrExpressionMappingApplyConfiguration `json:"uid,omitempty"` + Extra []ExtraMappingApplyConfiguration `json:"extra,omitempty"` } // TokenClaimMappingsApplyConfiguration constructs a declarative configuration of the TokenClaimMappings type for use with @@ -30,3 +32,24 @@ func (b *TokenClaimMappingsApplyConfiguration) WithGroups(value *PrefixedClaimMa b.Groups = value return b } + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *TokenClaimMappingsApplyConfiguration) WithUID(value *TokenClaimOrExpressionMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration { + b.UID = value + return b +} + +// WithExtra adds the given value to the Extra field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Extra field. +func (b *TokenClaimMappingsApplyConfiguration) WithExtra(values ...*ExtraMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExtra") + } + b.Extra = append(b.Extra, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimorexpressionmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimorexpressionmapping.go new file mode 100644 index 0000000000..6aab9e0b5d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimorexpressionmapping.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TokenClaimOrExpressionMappingApplyConfiguration represents a declarative configuration of the TokenClaimOrExpressionMapping type for use +// with apply. +type TokenClaimOrExpressionMappingApplyConfiguration struct { + Claim *string `json:"claim,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// TokenClaimOrExpressionMappingApplyConfiguration constructs a declarative configuration of the TokenClaimOrExpressionMapping type for use with +// apply. +func TokenClaimOrExpressionMapping() *TokenClaimOrExpressionMappingApplyConfiguration { + return &TokenClaimOrExpressionMappingApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *TokenClaimOrExpressionMappingApplyConfiguration) WithClaim(value string) *TokenClaimOrExpressionMappingApplyConfiguration { + b.Claim = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *TokenClaimOrExpressionMappingApplyConfiguration) WithExpression(value string) *TokenClaimOrExpressionMappingApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go index e90a90117f..2045ee5039 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go @@ -9,9 +9,9 @@ import ( // UsernameClaimMappingApplyConfiguration represents a declarative configuration of the UsernameClaimMapping type for use // with apply. type UsernameClaimMappingApplyConfiguration struct { - TokenClaimMappingApplyConfiguration `json:",inline"` - PrefixPolicy *configv1.UsernamePrefixPolicy `json:"prefixPolicy,omitempty"` - Prefix *UsernamePrefixApplyConfiguration `json:"prefix,omitempty"` + Claim *string `json:"claim,omitempty"` + PrefixPolicy *configv1.UsernamePrefixPolicy `json:"prefixPolicy,omitempty"` + Prefix *UsernamePrefixApplyConfiguration `json:"prefix,omitempty"` } // UsernameClaimMappingApplyConfiguration constructs a declarative configuration of the UsernameClaimMapping type for use with @@ -24,7 +24,7 @@ func UsernameClaimMapping() *UsernameClaimMappingApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Claim field is set to the value of the last call. func (b *UsernameClaimMappingApplyConfiguration) WithClaim(value string) *UsernameClaimMappingApplyConfiguration { - b.TokenClaimMappingApplyConfiguration.Claim = &value + b.Claim = &value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/custom.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/custom.go new file mode 100644 index 0000000000..3903cf882e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/custom.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// CustomApplyConfiguration represents a declarative configuration of the Custom type for use +// with apply. +type CustomApplyConfiguration struct { + Configs []GathererConfigApplyConfiguration `json:"configs,omitempty"` +} + +// CustomApplyConfiguration constructs a declarative configuration of the Custom type for use with +// apply. +func Custom() *CustomApplyConfiguration { + return &CustomApplyConfiguration{} +} + +// WithConfigs adds the given value to the Configs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Configs field. +func (b *CustomApplyConfiguration) WithConfigs(values ...*GathererConfigApplyConfiguration) *CustomApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfigs") + } + b.Configs = append(b.Configs, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherconfig.go new file mode 100644 index 0000000000..6a11bada8a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherconfig.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + configv1alpha2 "github.com/openshift/api/config/v1alpha2" +) + +// GatherConfigApplyConfiguration represents a declarative configuration of the GatherConfig type for use +// with apply. +type GatherConfigApplyConfiguration struct { + DataPolicy []configv1alpha2.DataPolicyOption `json:"dataPolicy,omitempty"` + Gatherers *GatherersApplyConfiguration `json:"gatherers,omitempty"` + Storage *StorageApplyConfiguration `json:"storage,omitempty"` +} + +// GatherConfigApplyConfiguration constructs a declarative configuration of the GatherConfig type for use with +// apply. +func GatherConfig() *GatherConfigApplyConfiguration { + return &GatherConfigApplyConfiguration{} +} + +// WithDataPolicy adds the given value to the DataPolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DataPolicy field. +func (b *GatherConfigApplyConfiguration) WithDataPolicy(values ...configv1alpha2.DataPolicyOption) *GatherConfigApplyConfiguration { + for i := range values { + b.DataPolicy = append(b.DataPolicy, values[i]) + } + return b +} + +// WithGatherers sets the Gatherers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Gatherers field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithGatherers(value *GatherersApplyConfiguration) *GatherConfigApplyConfiguration { + b.Gatherers = value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithStorage(value *StorageApplyConfiguration) *GatherConfigApplyConfiguration { + b.Storage = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gathererconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gathererconfig.go new file mode 100644 index 0000000000..bbcd7464ec --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gathererconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + configv1alpha2 "github.com/openshift/api/config/v1alpha2" +) + +// GathererConfigApplyConfiguration represents a declarative configuration of the GathererConfig type for use +// with apply. +type GathererConfigApplyConfiguration struct { + Name *string `json:"name,omitempty"` + State *configv1alpha2.GathererState `json:"state,omitempty"` +} + +// GathererConfigApplyConfiguration constructs a declarative configuration of the GathererConfig type for use with +// apply. +func GathererConfig() *GathererConfigApplyConfiguration { + return &GathererConfigApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GathererConfigApplyConfiguration) WithName(value string) *GathererConfigApplyConfiguration { + b.Name = &value + return b +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *GathererConfigApplyConfiguration) WithState(value configv1alpha2.GathererState) *GathererConfigApplyConfiguration { + b.State = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherers.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherers.go new file mode 100644 index 0000000000..328f1efda5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/gatherers.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + configv1alpha2 "github.com/openshift/api/config/v1alpha2" +) + +// GatherersApplyConfiguration represents a declarative configuration of the Gatherers type for use +// with apply. +type GatherersApplyConfiguration struct { + Mode *configv1alpha2.GatheringMode `json:"mode,omitempty"` + Custom *CustomApplyConfiguration `json:"custom,omitempty"` +} + +// GatherersApplyConfiguration constructs a declarative configuration of the Gatherers type for use with +// apply. +func Gatherers() *GatherersApplyConfiguration { + return &GatherersApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *GatherersApplyConfiguration) WithMode(value configv1alpha2.GatheringMode) *GatherersApplyConfiguration { + b.Mode = &value + return b +} + +// WithCustom sets the Custom field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Custom field is set to the value of the last call. +func (b *GatherersApplyConfiguration) WithCustom(value *CustomApplyConfiguration) *GatherersApplyConfiguration { + b.Custom = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go new file mode 100644 index 0000000000..f0c9797c5f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + configv1alpha2 "github.com/openshift/api/config/v1alpha2" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InsightsDataGatherApplyConfiguration represents a declarative configuration of the InsightsDataGather type for use +// with apply. +type InsightsDataGatherApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsDataGatherSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1alpha2.InsightsDataGatherStatus `json:"status,omitempty"` +} + +// InsightsDataGather constructs a declarative configuration of the InsightsDataGather type for use with +// apply. +func InsightsDataGather(name string) *InsightsDataGatherApplyConfiguration { + b := &InsightsDataGatherApplyConfiguration{} + b.WithName(name) + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1alpha2") + return b +} + +// ExtractInsightsDataGather extracts the applied configuration owned by fieldManager from +// insightsDataGather. If no managedFields are found in insightsDataGather for fieldManager, a +// InsightsDataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// insightsDataGather must be a unmodified InsightsDataGather API object that was retrieved from the Kubernetes API. +// ExtractInsightsDataGather provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInsightsDataGather(insightsDataGather *configv1alpha2.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "") +} + +// ExtractInsightsDataGatherStatus is the same as ExtractInsightsDataGather except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInsightsDataGatherStatus(insightsDataGather *configv1alpha2.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "status") +} + +func extractInsightsDataGather(insightsDataGather *configv1alpha2.InsightsDataGather, fieldManager string, subresource string) (*InsightsDataGatherApplyConfiguration, error) { + b := &InsightsDataGatherApplyConfiguration{} + err := managedfields.ExtractInto(insightsDataGather, internal.Parser().Type("com.github.openshift.api.config.v1alpha2.InsightsDataGather"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(insightsDataGather.Name) + + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InsightsDataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithSpec(value *InsightsDataGatherSpecApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha2.InsightsDataGatherStatus) *InsightsDataGatherApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagatherspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagatherspec.go new file mode 100644 index 0000000000..277b1de86b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagatherspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// InsightsDataGatherSpecApplyConfiguration represents a declarative configuration of the InsightsDataGatherSpec type for use +// with apply. +type InsightsDataGatherSpecApplyConfiguration struct { + GatherConfig *GatherConfigApplyConfiguration `json:"gatherConfig,omitempty"` +} + +// InsightsDataGatherSpecApplyConfiguration constructs a declarative configuration of the InsightsDataGatherSpec type for use with +// apply. +func InsightsDataGatherSpec() *InsightsDataGatherSpecApplyConfiguration { + return &InsightsDataGatherSpecApplyConfiguration{} +} + +// WithGatherConfig sets the GatherConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatherConfig field is set to the value of the last call. +func (b *InsightsDataGatherSpecApplyConfiguration) WithGatherConfig(value *GatherConfigApplyConfiguration) *InsightsDataGatherSpecApplyConfiguration { + b.GatherConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeclaimreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeclaimreference.go new file mode 100644 index 0000000000..9d194b02f8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeclaimreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// PersistentVolumeClaimReferenceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimReference type for use +// with apply. +type PersistentVolumeClaimReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PersistentVolumeClaimReferenceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimReference type for use with +// apply. +func PersistentVolumeClaimReference() *PersistentVolumeClaimReferenceApplyConfiguration { + return &PersistentVolumeClaimReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PersistentVolumeClaimReferenceApplyConfiguration) WithName(value string) *PersistentVolumeClaimReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeconfig.go new file mode 100644 index 0000000000..d3341d1b16 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/persistentvolumeconfig.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// PersistentVolumeConfigApplyConfiguration represents a declarative configuration of the PersistentVolumeConfig type for use +// with apply. +type PersistentVolumeConfigApplyConfiguration struct { + Claim *PersistentVolumeClaimReferenceApplyConfiguration `json:"claim,omitempty"` + MountPath *string `json:"mountPath,omitempty"` +} + +// PersistentVolumeConfigApplyConfiguration constructs a declarative configuration of the PersistentVolumeConfig type for use with +// apply. +func PersistentVolumeConfig() *PersistentVolumeConfigApplyConfiguration { + return &PersistentVolumeConfigApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *PersistentVolumeConfigApplyConfiguration) WithClaim(value *PersistentVolumeClaimReferenceApplyConfiguration) *PersistentVolumeConfigApplyConfiguration { + b.Claim = value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *PersistentVolumeConfigApplyConfiguration) WithMountPath(value string) *PersistentVolumeConfigApplyConfiguration { + b.MountPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/storage.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/storage.go new file mode 100644 index 0000000000..596258c48b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/storage.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + configv1alpha2 "github.com/openshift/api/config/v1alpha2" +) + +// StorageApplyConfiguration represents a declarative configuration of the Storage type for use +// with apply. +type StorageApplyConfiguration struct { + Type *configv1alpha2.StorageType `json:"type,omitempty"` + PersistentVolume *PersistentVolumeConfigApplyConfiguration `json:"persistentVolume,omitempty"` +} + +// StorageApplyConfiguration constructs a declarative configuration of the Storage type for use with +// apply. +func Storage() *StorageApplyConfiguration { + return &StorageApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithType(value configv1alpha2.StorageType) *StorageApplyConfiguration { + b.Type = &value + return b +} + +// WithPersistentVolume sets the PersistentVolume field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolume field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithPersistentVolume(value *PersistentVolumeConfigApplyConfiguration) *StorageApplyConfiguration { + b.PersistentVolume = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 636d305052..439a5df058 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -133,6 +133,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string + default: "" unions: - discriminator: type - name: com.github.openshift.api.config.v1.AWSKMSConfig @@ -266,6 +267,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: profile type: scalar: string + default: "" - name: com.github.openshift.api.config.v1.Authentication map: fields: @@ -597,6 +599,51 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.ClusterImagePolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ClusterImagePolicySpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ClusterImagePolicyStatus + default: {} +- name: com.github.openshift.api.config.v1.ClusterImagePolicySpec + map: + fields: + - name: policy + type: + namedType: com.github.openshift.api.config.v1.Policy + default: {} + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1.ClusterImagePolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: com.github.openshift.api.config.v1.ClusterNetworkEntry map: fields: @@ -1175,6 +1222,17 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.CloudControllerManagerStatus default: {} +- name: com.github.openshift.api.config.v1.ExtraMapping + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: valueExpression + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.FeatureGate map: fields: @@ -1255,6 +1313,19 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - version +- name: com.github.openshift.api.config.v1.FulcioCAWithRekor + map: + fields: + - name: fulcioCAData + type: + scalar: string + - name: fulcioSubject + type: + namedType: com.github.openshift.api.config.v1.PolicyFulcioSubject + default: {} + - name: rekorKeyData + type: + scalar: string - name: com.github.openshift.api.config.v1.GCPPlatformSpec map: elementType: @@ -1642,6 +1713,51 @@ var schemaYAML = typed.YAMLObject(`types: - name: value type: scalar: string +- name: com.github.openshift.api.config.v1.ImagePolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ImagePolicySpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ImagePolicyStatus + default: {} +- name: com.github.openshift.api.config.v1.ImagePolicySpec + map: + fields: + - name: policy + type: + namedType: com.github.openshift.api.config.v1.Policy + default: {} + - name: scopes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1.ImagePolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: com.github.openshift.api.config.v1.ImageSpec map: fields: @@ -1804,7 +1920,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: infrastructureTopology type: scalar: string - default: "" - name: platform type: scalar: string @@ -2790,6 +2905,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: nodeDNSIP type: scalar: string +- name: com.github.openshift.api.config.v1.PKI + map: + fields: + - name: caIntermediatesData + type: + scalar: string + - name: caRootsData + type: + scalar: string + - name: pkiCertificateSubject + type: + namedType: com.github.openshift.api.config.v1.PKICertificateSubject + default: {} +- name: com.github.openshift.api.config.v1.PKICertificateSubject + map: + fields: + - name: email + type: + scalar: string + - name: hostname + type: + scalar: string - name: com.github.openshift.api.config.v1.PlatformSpec map: fields: @@ -2888,6 +3025,90 @@ var schemaYAML = typed.YAMLObject(`types: - name: vsphere type: namedType: com.github.openshift.api.config.v1.VSpherePlatformStatus +- name: com.github.openshift.api.config.v1.Policy + map: + fields: + - name: rootOfTrust + type: + namedType: com.github.openshift.api.config.v1.PolicyRootOfTrust + default: {} + - name: signedIdentity + type: + namedType: com.github.openshift.api.config.v1.PolicyIdentity +- name: com.github.openshift.api.config.v1.PolicyFulcioSubject + map: + fields: + - name: oidcIssuer + type: + scalar: string + default: "" + - name: signedEmail + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.PolicyIdentity + map: + fields: + - name: exactRepository + type: + namedType: com.github.openshift.api.config.v1.PolicyMatchExactRepository + - name: matchPolicy + type: + scalar: string + default: "" + - name: remapIdentity + type: + namedType: com.github.openshift.api.config.v1.PolicyMatchRemapIdentity + unions: + - discriminator: matchPolicy + fields: + - fieldName: exactRepository + discriminatorValue: PolicyMatchExactRepository + - fieldName: remapIdentity + discriminatorValue: PolicyMatchRemapIdentity +- name: com.github.openshift.api.config.v1.PolicyMatchExactRepository + map: + fields: + - name: repository + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.PolicyMatchRemapIdentity + map: + fields: + - name: prefix + type: + scalar: string + default: "" + - name: signedPrefix + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.PolicyRootOfTrust + map: + fields: + - name: fulcioCAWithRekor + type: + namedType: com.github.openshift.api.config.v1.FulcioCAWithRekor + - name: pki + type: + namedType: com.github.openshift.api.config.v1.PKI + - name: policyType + type: + scalar: string + default: "" + - name: publicKey + type: + namedType: com.github.openshift.api.config.v1.PublicKey + unions: + - discriminator: policyType + fields: + - fieldName: fulcioCAWithRekor + discriminatorValue: FulcioCAWithRekor + - fieldName: pki + discriminatorValue: PKI + - fieldName: publicKey + discriminatorValue: PublicKey - name: com.github.openshift.api.config.v1.PowerVSPlatformSpec map: fields: @@ -3063,6 +3284,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: noProxy type: scalar: string +- name: com.github.openshift.api.config.v1.PublicKey + map: + fields: + - name: keyData + type: + scalar: string + - name: rekorKeyData + type: + scalar: string - name: com.github.openshift.api.config.v1.RegistryLocation map: fields: @@ -3318,14 +3548,34 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.TokenClaimMappings map: fields: + - name: extra + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.ExtraMapping + elementRelationship: associative + keys: + - key - name: groups type: namedType: com.github.openshift.api.config.v1.PrefixedClaimMapping default: {} + - name: uid + type: + namedType: com.github.openshift.api.config.v1.TokenClaimOrExpressionMapping - name: username type: namedType: com.github.openshift.api.config.v1.UsernameClaimMapping default: {} +- name: com.github.openshift.api.config.v1.TokenClaimOrExpressionMapping + map: + fields: + - name: claim + type: + scalar: string + - name: expression + type: + scalar: string - name: com.github.openshift.api.config.v1.TokenClaimValidationRule map: fields: @@ -3437,6 +3687,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + unions: + - discriminator: prefixPolicy + fields: + - fieldName: claim + discriminatorValue: Claim + - fieldName: prefix + discriminatorValue: Prefix - name: com.github.openshift.api.config.v1.UsernamePrefix map: fields: @@ -4073,6 +4330,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxNumberOfBackups type: scalar: numeric + default: 0 - name: com.github.openshift.api.config.v1alpha1.RetentionPolicy map: fields: @@ -4099,6 +4357,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxSizeOfBackupsGb type: scalar: numeric + default: 0 - name: com.github.openshift.api.config.v1alpha1.Storage map: fields: @@ -4116,6 +4375,121 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1alpha2.Custom + map: + fields: + - name: configs + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha2.GathererConfig + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.config.v1alpha2.GatherConfig + map: + fields: + - name: dataPolicy + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: gatherers + type: + namedType: com.github.openshift.api.config.v1alpha2.Gatherers + default: {} + - name: storage + type: + namedType: com.github.openshift.api.config.v1alpha2.Storage +- name: com.github.openshift.api.config.v1alpha2.GathererConfig + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: state + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha2.Gatherers + map: + fields: + - name: custom + type: + namedType: com.github.openshift.api.config.v1alpha2.Custom + - name: mode + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha2.InsightsDataGather + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha2.InsightsDataGatherSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha2.InsightsDataGatherStatus + default: {} +- name: com.github.openshift.api.config.v1alpha2.InsightsDataGatherSpec + map: + fields: + - name: gatherConfig + type: + namedType: com.github.openshift.api.config.v1alpha2.GatherConfig + default: {} +- name: com.github.openshift.api.config.v1alpha2.InsightsDataGatherStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1alpha2.PersistentVolumeClaimReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1alpha2.PersistentVolumeConfig + map: + fields: + - name: claim + type: + namedType: com.github.openshift.api.config.v1alpha2.PersistentVolumeClaimReference + default: {} + - name: mountPath + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha2.Storage + map: + fields: + - name: persistentVolume + type: + namedType: com.github.openshift.api.config.v1alpha2.PersistentVolumeConfig + - name: type + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ConfigMapKeySelector map: fields: diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go index f9ed357b64..fdb9450b84 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go @@ -8,6 +8,7 @@ import ( configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + configv1alpha2 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -17,6 +18,7 @@ type Interface interface { Discovery() discovery.DiscoveryInterface ConfigV1() configv1.ConfigV1Interface ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface + ConfigV1alpha2() configv1alpha2.ConfigV1alpha2Interface } // Clientset contains the clients for groups. @@ -24,6 +26,7 @@ type Clientset struct { *discovery.DiscoveryClient configV1 *configv1.ConfigV1Client configV1alpha1 *configv1alpha1.ConfigV1alpha1Client + configV1alpha2 *configv1alpha2.ConfigV1alpha2Client } // ConfigV1 retrieves the ConfigV1Client @@ -36,6 +39,11 @@ func (c *Clientset) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { return c.configV1alpha1 } +// ConfigV1alpha2 retrieves the ConfigV1alpha2Client +func (c *Clientset) ConfigV1alpha2() configv1alpha2.ConfigV1alpha2Interface { + return c.configV1alpha2 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -88,6 +96,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.configV1alpha2, err = configv1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -111,6 +123,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.configV1 = configv1.New(c) cs.configV1alpha1 = configv1alpha1.New(c) + cs.configV1alpha2 = configv1alpha2.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go index 6340555dd1..eb67739213 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go @@ -5,6 +5,7 @@ package scheme import ( configv1 "github.com/openshift/api/config/v1" configv1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha2 "github.com/openshift/api/config/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -18,6 +19,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ configv1.AddToScheme, configv1alpha1.AddToScheme, + configv1alpha2.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterimagepolicy.go new file mode 100644 index 0000000000..b0452f6116 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterimagepolicy.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterImagePoliciesGetter has a method to return a ClusterImagePolicyInterface. +// A group's client should implement this interface. +type ClusterImagePoliciesGetter interface { + ClusterImagePolicies() ClusterImagePolicyInterface +} + +// ClusterImagePolicyInterface has methods to work with ClusterImagePolicy resources. +type ClusterImagePolicyInterface interface { + Create(ctx context.Context, clusterImagePolicy *configv1.ClusterImagePolicy, opts metav1.CreateOptions) (*configv1.ClusterImagePolicy, error) + Update(ctx context.Context, clusterImagePolicy *configv1.ClusterImagePolicy, opts metav1.UpdateOptions) (*configv1.ClusterImagePolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterImagePolicy *configv1.ClusterImagePolicy, opts metav1.UpdateOptions) (*configv1.ClusterImagePolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ClusterImagePolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ClusterImagePolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ClusterImagePolicy, err error) + Apply(ctx context.Context, clusterImagePolicy *applyconfigurationsconfigv1.ClusterImagePolicyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterImagePolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterImagePolicy *applyconfigurationsconfigv1.ClusterImagePolicyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ClusterImagePolicy, err error) + ClusterImagePolicyExpansion +} + +// clusterImagePolicies implements ClusterImagePolicyInterface +type clusterImagePolicies struct { + *gentype.ClientWithListAndApply[*configv1.ClusterImagePolicy, *configv1.ClusterImagePolicyList, *applyconfigurationsconfigv1.ClusterImagePolicyApplyConfiguration] +} + +// newClusterImagePolicies returns a ClusterImagePolicies +func newClusterImagePolicies(c *ConfigV1Client) *clusterImagePolicies { + return &clusterImagePolicies{ + gentype.NewClientWithListAndApply[*configv1.ClusterImagePolicy, *configv1.ClusterImagePolicyList, *applyconfigurationsconfigv1.ClusterImagePolicyApplyConfiguration]( + "clusterimagepolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.ClusterImagePolicy { return &configv1.ClusterImagePolicy{} }, + func() *configv1.ClusterImagePolicyList { return &configv1.ClusterImagePolicyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index bbb0b312ee..70957eee8b 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -15,6 +15,7 @@ type ConfigV1Interface interface { APIServersGetter AuthenticationsGetter BuildsGetter + ClusterImagePoliciesGetter ClusterOperatorsGetter ClusterVersionsGetter ConsolesGetter @@ -23,6 +24,7 @@ type ConfigV1Interface interface { ImagesGetter ImageContentPoliciesGetter ImageDigestMirrorSetsGetter + ImagePoliciesGetter ImageTagMirrorSetsGetter InfrastructuresGetter IngressesGetter @@ -52,6 +54,10 @@ func (c *ConfigV1Client) Builds() BuildInterface { return newBuilds(c) } +func (c *ConfigV1Client) ClusterImagePolicies() ClusterImagePolicyInterface { + return newClusterImagePolicies(c) +} + func (c *ConfigV1Client) ClusterOperators() ClusterOperatorInterface { return newClusterOperators(c) } @@ -84,6 +90,10 @@ func (c *ConfigV1Client) ImageDigestMirrorSets() ImageDigestMirrorSetInterface { return newImageDigestMirrorSets(c) } +func (c *ConfigV1Client) ImagePolicies(namespace string) ImagePolicyInterface { + return newImagePolicies(c, namespace) +} + func (c *ConfigV1Client) ImageTagMirrorSets() ImageTagMirrorSetInterface { return newImageTagMirrorSets(c) } @@ -129,9 +139,7 @@ func (c *ConfigV1Client) Schedulers() SchedulerInterface { // where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ConfigV1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err @@ -143,9 +151,7 @@ func NewForConfig(c *rest.Config) (*ConfigV1Client, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err @@ -168,7 +174,7 @@ func New(c rest.Interface) *ConfigV1Client { return &ConfigV1Client{c} } -func setConfigDefaults(config *rest.Config) error { +func setConfigDefaults(config *rest.Config) { gv := configv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" @@ -177,8 +183,6 @@ func setConfigDefaults(config *rest.Config) error { if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - - return nil } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go index a56721ba9d..44ad19dcb3 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -8,6 +8,8 @@ type AuthenticationExpansion interface{} type BuildExpansion interface{} +type ClusterImagePolicyExpansion interface{} + type ClusterOperatorExpansion interface{} type ClusterVersionExpansion interface{} @@ -24,6 +26,8 @@ type ImageContentPolicyExpansion interface{} type ImageDigestMirrorSetExpansion interface{} +type ImagePolicyExpansion interface{} + type ImageTagMirrorSetExpansion interface{} type InfrastructureExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagepolicy.go new file mode 100644 index 0000000000..4dae127574 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagepolicy.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ImagePoliciesGetter has a method to return a ImagePolicyInterface. +// A group's client should implement this interface. +type ImagePoliciesGetter interface { + ImagePolicies(namespace string) ImagePolicyInterface +} + +// ImagePolicyInterface has methods to work with ImagePolicy resources. +type ImagePolicyInterface interface { + Create(ctx context.Context, imagePolicy *configv1.ImagePolicy, opts metav1.CreateOptions) (*configv1.ImagePolicy, error) + Update(ctx context.Context, imagePolicy *configv1.ImagePolicy, opts metav1.UpdateOptions) (*configv1.ImagePolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, imagePolicy *configv1.ImagePolicy, opts metav1.UpdateOptions) (*configv1.ImagePolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.ImagePolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.ImagePolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.ImagePolicy, err error) + Apply(ctx context.Context, imagePolicy *applyconfigurationsconfigv1.ImagePolicyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImagePolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, imagePolicy *applyconfigurationsconfigv1.ImagePolicyApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.ImagePolicy, err error) + ImagePolicyExpansion +} + +// imagePolicies implements ImagePolicyInterface +type imagePolicies struct { + *gentype.ClientWithListAndApply[*configv1.ImagePolicy, *configv1.ImagePolicyList, *applyconfigurationsconfigv1.ImagePolicyApplyConfiguration] +} + +// newImagePolicies returns a ImagePolicies +func newImagePolicies(c *ConfigV1Client, namespace string) *imagePolicies { + return &imagePolicies{ + gentype.NewClientWithListAndApply[*configv1.ImagePolicy, *configv1.ImagePolicyList, *applyconfigurationsconfigv1.ImagePolicyApplyConfiguration]( + "imagepolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *configv1.ImagePolicy { return &configv1.ImagePolicy{} }, + func() *configv1.ImagePolicyList { return &configv1.ImagePolicyList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go index 70ebfa3cdf..2530a4a645 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go @@ -49,9 +49,7 @@ func (c *ConfigV1alpha1Client) InsightsDataGathers() InsightsDataGatherInterface // where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ConfigV1alpha1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err @@ -63,9 +61,7 @@ func NewForConfig(c *rest.Config) (*ConfigV1alpha1Client, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1alpha1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err @@ -88,7 +84,7 @@ func New(c rest.Interface) *ConfigV1alpha1Client { return &ConfigV1alpha1Client{c} } -func setConfigDefaults(config *rest.Config) error { +func setConfigDefaults(config *rest.Config) { gv := configv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" @@ -97,8 +93,6 @@ func setConfigDefaults(config *rest.Config) error { if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - - return nil } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/config_client.go new file mode 100644 index 0000000000..7c4407c610 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/config_client.go @@ -0,0 +1,85 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + http "net/http" + + configv1alpha2 "github.com/openshift/api/config/v1alpha2" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ConfigV1alpha2Interface interface { + RESTClient() rest.Interface + InsightsDataGathersGetter +} + +// ConfigV1alpha2Client is used to interact with features provided by the config.openshift.io group. +type ConfigV1alpha2Client struct { + restClient rest.Interface +} + +func (c *ConfigV1alpha2Client) InsightsDataGathers() InsightsDataGatherInterface { + return newInsightsDataGathers(c) +} + +// NewForConfig creates a new ConfigV1alpha2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ConfigV1alpha2Client, error) { + config := *c + setConfigDefaults(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ConfigV1alpha2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1alpha2Client, error) { + config := *c + setConfigDefaults(&config) + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ConfigV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new ConfigV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ConfigV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ConfigV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ConfigV1alpha2Client { + return &ConfigV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) { + gv := configv1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ConfigV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/doc.go new file mode 100644 index 0000000000..c11da26828 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/generated_expansion.go new file mode 100644 index 0000000000..6f1f055c79 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type InsightsDataGatherExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/insightsdatagather.go new file mode 100644 index 0000000000..ad5be4b226 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha2/insightsdatagather.go @@ -0,0 +1,58 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + context "context" + + configv1alpha2 "github.com/openshift/api/config/v1alpha2" + applyconfigurationsconfigv1alpha2 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// InsightsDataGathersGetter has a method to return a InsightsDataGatherInterface. +// A group's client should implement this interface. +type InsightsDataGathersGetter interface { + InsightsDataGathers() InsightsDataGatherInterface +} + +// InsightsDataGatherInterface has methods to work with InsightsDataGather resources. +type InsightsDataGatherInterface interface { + Create(ctx context.Context, insightsDataGather *configv1alpha2.InsightsDataGather, opts v1.CreateOptions) (*configv1alpha2.InsightsDataGather, error) + Update(ctx context.Context, insightsDataGather *configv1alpha2.InsightsDataGather, opts v1.UpdateOptions) (*configv1alpha2.InsightsDataGather, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, insightsDataGather *configv1alpha2.InsightsDataGather, opts v1.UpdateOptions) (*configv1alpha2.InsightsDataGather, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*configv1alpha2.InsightsDataGather, error) + List(ctx context.Context, opts v1.ListOptions) (*configv1alpha2.InsightsDataGatherList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1alpha2.InsightsDataGather, err error) + Apply(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1alpha2.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha2.InsightsDataGather, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1alpha2.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *configv1alpha2.InsightsDataGather, err error) + InsightsDataGatherExpansion +} + +// insightsDataGathers implements InsightsDataGatherInterface +type insightsDataGathers struct { + *gentype.ClientWithListAndApply[*configv1alpha2.InsightsDataGather, *configv1alpha2.InsightsDataGatherList, *applyconfigurationsconfigv1alpha2.InsightsDataGatherApplyConfiguration] +} + +// newInsightsDataGathers returns a InsightsDataGathers +func newInsightsDataGathers(c *ConfigV1alpha2Client) *insightsDataGathers { + return &insightsDataGathers{ + gentype.NewClientWithListAndApply[*configv1alpha2.InsightsDataGather, *configv1alpha2.InsightsDataGatherList, *applyconfigurationsconfigv1alpha2.InsightsDataGatherApplyConfiguration]( + "insightsdatagathers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1alpha2.InsightsDataGather { return &configv1alpha2.InsightsDataGather{} }, + func() *configv1alpha2.InsightsDataGatherList { return &configv1alpha2.InsightsDataGatherList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go index 3e7e6e8d3b..7ffc394de4 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go @@ -5,6 +5,7 @@ package config import ( v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" v1alpha1 "github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1" + v1alpha2 "github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" ) @@ -14,6 +15,8 @@ type Interface interface { V1() v1.Interface // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface } type group struct { @@ -36,3 +39,8 @@ func (g *group) V1() v1.Interface { func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go index 262aa7b0a2..f022c5fff5 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go @@ -45,13 +45,25 @@ func NewFilteredAPIServerInformer(client versioned.Interface, resyncPeriod time. if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().APIServers().List(context.TODO(), options) + return client.ConfigV1().APIServers().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().APIServers().Watch(context.TODO(), options) + return client.ConfigV1().APIServers().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().APIServers().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().APIServers().Watch(ctx, options) }, }, &apiconfigv1.APIServer{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go index efe2c253e9..a52a281783 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go @@ -45,13 +45,25 @@ func NewFilteredAuthenticationInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Authentications().List(context.TODO(), options) + return client.ConfigV1().Authentications().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Authentications().Watch(context.TODO(), options) + return client.ConfigV1().Authentications().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Authentications().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Authentications().Watch(ctx, options) }, }, &apiconfigv1.Authentication{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go index 451ba252da..cfa41e7bc4 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go @@ -45,13 +45,25 @@ func NewFilteredBuildInformer(client versioned.Interface, resyncPeriod time.Dura if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Builds().List(context.TODO(), options) + return client.ConfigV1().Builds().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Builds().Watch(context.TODO(), options) + return client.ConfigV1().Builds().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Builds().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Builds().Watch(ctx, options) }, }, &apiconfigv1.Build{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterimagepolicy.go new file mode 100644 index 0000000000..9dfb2b3da3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterimagepolicy.go @@ -0,0 +1,85 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterImagePolicyInformer provides access to a shared informer and lister for +// ClusterImagePolicies. +type ClusterImagePolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ClusterImagePolicyLister +} + +type clusterImagePolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterImagePolicyInformer constructs a new informer for ClusterImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterImagePolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterImagePolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterImagePolicyInformer constructs a new informer for ClusterImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterImagePolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterImagePolicies().List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterImagePolicies().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterImagePolicies().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterImagePolicies().Watch(ctx, options) + }, + }, + &apiconfigv1.ClusterImagePolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterImagePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterImagePolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterImagePolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ClusterImagePolicy{}, f.defaultInformer) +} + +func (f *clusterImagePolicyInformer) Lister() configv1.ClusterImagePolicyLister { + return configv1.NewClusterImagePolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go index 1eda53c8b9..79728d2b8d 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go @@ -45,13 +45,25 @@ func NewFilteredClusterOperatorInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ClusterOperators().List(context.TODO(), options) + return client.ConfigV1().ClusterOperators().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ClusterOperators().Watch(context.TODO(), options) + return client.ConfigV1().ClusterOperators().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterOperators().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterOperators().Watch(ctx, options) }, }, &apiconfigv1.ClusterOperator{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go index c3915175ea..668c73ac9e 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go @@ -45,13 +45,25 @@ func NewFilteredClusterVersionInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ClusterVersions().List(context.TODO(), options) + return client.ConfigV1().ClusterVersions().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ClusterVersions().Watch(context.TODO(), options) + return client.ConfigV1().ClusterVersions().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterVersions().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterVersions().Watch(ctx, options) }, }, &apiconfigv1.ClusterVersion{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go index 05a36ec0a1..f39cbb7693 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go @@ -45,13 +45,25 @@ func NewFilteredConsoleInformer(client versioned.Interface, resyncPeriod time.Du if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Consoles().List(context.TODO(), options) + return client.ConfigV1().Consoles().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Consoles().Watch(context.TODO(), options) + return client.ConfigV1().Consoles().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Consoles().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Consoles().Watch(ctx, options) }, }, &apiconfigv1.Console{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go index af44dfce98..83860dc914 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go @@ -45,13 +45,25 @@ func NewFilteredDNSInformer(client versioned.Interface, resyncPeriod time.Durati if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().DNSes().List(context.TODO(), options) + return client.ConfigV1().DNSes().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().DNSes().Watch(context.TODO(), options) + return client.ConfigV1().DNSes().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().DNSes().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().DNSes().Watch(ctx, options) }, }, &apiconfigv1.DNS{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go index dc1e205070..d6cfb650da 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go @@ -45,13 +45,25 @@ func NewFilteredFeatureGateInformer(client versioned.Interface, resyncPeriod tim if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().FeatureGates().List(context.TODO(), options) + return client.ConfigV1().FeatureGates().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().FeatureGates().Watch(context.TODO(), options) + return client.ConfigV1().FeatureGates().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().FeatureGates().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().FeatureGates().Watch(ctx, options) }, }, &apiconfigv1.FeatureGate{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go index 5f68a35ec6..e67276031c 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go @@ -45,13 +45,25 @@ func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Dura if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Images().List(context.TODO(), options) + return client.ConfigV1().Images().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Images().Watch(context.TODO(), options) + return client.ConfigV1().Images().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Images().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Images().Watch(ctx, options) }, }, &apiconfigv1.Image{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go index e062099ea2..59a069f476 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go @@ -45,13 +45,25 @@ func NewFilteredImageContentPolicyInformer(client versioned.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ImageContentPolicies().List(context.TODO(), options) + return client.ConfigV1().ImageContentPolicies().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ImageContentPolicies().Watch(context.TODO(), options) + return client.ConfigV1().ImageContentPolicies().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageContentPolicies().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageContentPolicies().Watch(ctx, options) }, }, &apiconfigv1.ImageContentPolicy{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go index 0bdadff5b5..3eeb939c14 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go @@ -45,13 +45,25 @@ func NewFilteredImageDigestMirrorSetInformer(client versioned.Interface, resyncP if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ImageDigestMirrorSets().List(context.TODO(), options) + return client.ConfigV1().ImageDigestMirrorSets().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ImageDigestMirrorSets().Watch(context.TODO(), options) + return client.ConfigV1().ImageDigestMirrorSets().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageDigestMirrorSets().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageDigestMirrorSets().Watch(ctx, options) }, }, &apiconfigv1.ImageDigestMirrorSet{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagepolicy.go new file mode 100644 index 0000000000..191fc5db98 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagepolicy.go @@ -0,0 +1,86 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImagePolicyInformer provides access to a shared informer and lister for +// ImagePolicies. +type ImagePolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.ImagePolicyLister +} + +type imagePolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImagePolicyInformer constructs a new informer for ImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImagePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImagePolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImagePolicyInformer constructs a new informer for ImagePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImagePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImagePolicies(namespace).List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImagePolicies(namespace).Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImagePolicies(namespace).List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImagePolicies(namespace).Watch(ctx, options) + }, + }, + &apiconfigv1.ImagePolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *imagePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImagePolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imagePolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.ImagePolicy{}, f.defaultInformer) +} + +func (f *imagePolicyInformer) Lister() configv1.ImagePolicyLister { + return configv1.NewImagePolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go index 92bf24f201..51d1c07fdf 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go @@ -45,13 +45,25 @@ func NewFilteredImageTagMirrorSetInformer(client versioned.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ImageTagMirrorSets().List(context.TODO(), options) + return client.ConfigV1().ImageTagMirrorSets().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().ImageTagMirrorSets().Watch(context.TODO(), options) + return client.ConfigV1().ImageTagMirrorSets().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageTagMirrorSets().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ImageTagMirrorSets().Watch(ctx, options) }, }, &apiconfigv1.ImageTagMirrorSet{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go index 4891bd2497..0f128b569c 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go @@ -45,13 +45,25 @@ func NewFilteredInfrastructureInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Infrastructures().List(context.TODO(), options) + return client.ConfigV1().Infrastructures().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Infrastructures().Watch(context.TODO(), options) + return client.ConfigV1().Infrastructures().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Infrastructures().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Infrastructures().Watch(ctx, options) }, }, &apiconfigv1.Infrastructure{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go index 59ca11638b..0a1f492ded 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go @@ -45,13 +45,25 @@ func NewFilteredIngressInformer(client versioned.Interface, resyncPeriod time.Du if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Ingresses().List(context.TODO(), options) + return client.ConfigV1().Ingresses().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Ingresses().Watch(context.TODO(), options) + return client.ConfigV1().Ingresses().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Ingresses().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Ingresses().Watch(ctx, options) }, }, &apiconfigv1.Ingress{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go index f49b1d2287..ff4c521b04 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -14,6 +14,8 @@ type Interface interface { Authentications() AuthenticationInformer // Builds returns a BuildInformer. Builds() BuildInformer + // ClusterImagePolicies returns a ClusterImagePolicyInformer. + ClusterImagePolicies() ClusterImagePolicyInformer // ClusterOperators returns a ClusterOperatorInformer. ClusterOperators() ClusterOperatorInformer // ClusterVersions returns a ClusterVersionInformer. @@ -30,6 +32,8 @@ type Interface interface { ImageContentPolicies() ImageContentPolicyInformer // ImageDigestMirrorSets returns a ImageDigestMirrorSetInformer. ImageDigestMirrorSets() ImageDigestMirrorSetInformer + // ImagePolicies returns a ImagePolicyInformer. + ImagePolicies() ImagePolicyInformer // ImageTagMirrorSets returns a ImageTagMirrorSetInformer. ImageTagMirrorSets() ImageTagMirrorSetInformer // Infrastructures returns a InfrastructureInformer. @@ -78,6 +82,11 @@ func (v *version) Builds() BuildInformer { return &buildInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// ClusterImagePolicies returns a ClusterImagePolicyInformer. +func (v *version) ClusterImagePolicies() ClusterImagePolicyInformer { + return &clusterImagePolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ClusterOperators returns a ClusterOperatorInformer. func (v *version) ClusterOperators() ClusterOperatorInformer { return &clusterOperatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} @@ -118,6 +127,11 @@ func (v *version) ImageDigestMirrorSets() ImageDigestMirrorSetInformer { return &imageDigestMirrorSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// ImagePolicies returns a ImagePolicyInformer. +func (v *version) ImagePolicies() ImagePolicyInformer { + return &imagePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ImageTagMirrorSets returns a ImageTagMirrorSetInformer. func (v *version) ImageTagMirrorSets() ImageTagMirrorSetInformer { return &imageTagMirrorSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go index 48e4896def..ea96c4e794 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go @@ -45,13 +45,25 @@ func NewFilteredNetworkInformer(client versioned.Interface, resyncPeriod time.Du if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Networks().List(context.TODO(), options) + return client.ConfigV1().Networks().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Networks().Watch(context.TODO(), options) + return client.ConfigV1().Networks().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Networks().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Networks().Watch(ctx, options) }, }, &apiconfigv1.Network{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go index 2cb791b00c..d098a79b51 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go @@ -45,13 +45,25 @@ func NewFilteredNodeInformer(client versioned.Interface, resyncPeriod time.Durat if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Nodes().List(context.TODO(), options) + return client.ConfigV1().Nodes().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Nodes().Watch(context.TODO(), options) + return client.ConfigV1().Nodes().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Nodes().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Nodes().Watch(ctx, options) }, }, &apiconfigv1.Node{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go index 75128769f3..3c66d94d1e 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go @@ -45,13 +45,25 @@ func NewFilteredOAuthInformer(client versioned.Interface, resyncPeriod time.Dura if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().OAuths().List(context.TODO(), options) + return client.ConfigV1().OAuths().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().OAuths().Watch(context.TODO(), options) + return client.ConfigV1().OAuths().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OAuths().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OAuths().Watch(ctx, options) }, }, &apiconfigv1.OAuth{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go index d2196b2255..972c711bb9 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go @@ -45,13 +45,25 @@ func NewFilteredOperatorHubInformer(client versioned.Interface, resyncPeriod tim if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().OperatorHubs().List(context.TODO(), options) + return client.ConfigV1().OperatorHubs().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().OperatorHubs().Watch(context.TODO(), options) + return client.ConfigV1().OperatorHubs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OperatorHubs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OperatorHubs().Watch(ctx, options) }, }, &apiconfigv1.OperatorHub{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go index 0c5604e1e4..c6aa8bfbc2 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go @@ -45,13 +45,25 @@ func NewFilteredProjectInformer(client versioned.Interface, resyncPeriod time.Du if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Projects().List(context.TODO(), options) + return client.ConfigV1().Projects().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Projects().Watch(context.TODO(), options) + return client.ConfigV1().Projects().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Projects().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Projects().Watch(ctx, options) }, }, &apiconfigv1.Project{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go index aa1c2c551b..607e5c2273 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go @@ -45,13 +45,25 @@ func NewFilteredProxyInformer(client versioned.Interface, resyncPeriod time.Dura if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Proxies().List(context.TODO(), options) + return client.ConfigV1().Proxies().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Proxies().Watch(context.TODO(), options) + return client.ConfigV1().Proxies().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Proxies().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Proxies().Watch(ctx, options) }, }, &apiconfigv1.Proxy{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go index 0117f2941d..7a7783f547 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go @@ -45,13 +45,25 @@ func NewFilteredSchedulerInformer(client versioned.Interface, resyncPeriod time. if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Schedulers().List(context.TODO(), options) + return client.ConfigV1().Schedulers().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Schedulers().Watch(context.TODO(), options) + return client.ConfigV1().Schedulers().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Schedulers().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Schedulers().Watch(ctx, options) }, }, &apiconfigv1.Scheduler{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go index bed1857ee8..2ccf1a3146 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go @@ -45,13 +45,25 @@ func NewFilteredBackupInformer(client versioned.Interface, resyncPeriod time.Dur if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().Backups().List(context.TODO(), options) + return client.ConfigV1alpha1().Backups().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().Backups().Watch(context.TODO(), options) + return client.ConfigV1alpha1().Backups().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().Backups().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().Backups().Watch(ctx, options) }, }, &apiconfigv1alpha1.Backup{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go index b11866c35f..c525adfb9b 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go @@ -45,13 +45,25 @@ func NewFilteredClusterImagePolicyInformer(client versioned.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().ClusterImagePolicies().List(context.TODO(), options) + return client.ConfigV1alpha1().ClusterImagePolicies().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().ClusterImagePolicies().Watch(context.TODO(), options) + return client.ConfigV1alpha1().ClusterImagePolicies().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterImagePolicies().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterImagePolicies().Watch(ctx, options) }, }, &apiconfigv1alpha1.ClusterImagePolicy{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go index 94a2ec3e4e..5c47b4b2cb 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clustermonitoring.go @@ -45,13 +45,25 @@ func NewFilteredClusterMonitoringInformer(client versioned.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().ClusterMonitorings().List(context.TODO(), options) + return client.ConfigV1alpha1().ClusterMonitorings().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().ClusterMonitorings().Watch(context.TODO(), options) + return client.ConfigV1alpha1().ClusterMonitorings().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterMonitorings().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ClusterMonitorings().Watch(ctx, options) }, }, &apiconfigv1alpha1.ClusterMonitoring{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go index d6ab02a000..875eb9d796 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go @@ -46,13 +46,25 @@ func NewFilteredImagePolicyInformer(client versioned.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().ImagePolicies(namespace).List(context.TODO(), options) + return client.ConfigV1alpha1().ImagePolicies(namespace).List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().ImagePolicies(namespace).Watch(context.TODO(), options) + return client.ConfigV1alpha1().ImagePolicies(namespace).Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ImagePolicies(namespace).List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().ImagePolicies(namespace).Watch(ctx, options) }, }, &apiconfigv1alpha1.ImagePolicy{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go index 51f09bad2c..4df1b4b1cb 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go @@ -45,13 +45,25 @@ func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().InsightsDataGathers().List(context.TODO(), options) + return client.ConfigV1alpha1().InsightsDataGathers().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1alpha1().InsightsDataGathers().Watch(context.TODO(), options) + return client.ConfigV1alpha1().InsightsDataGathers().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().InsightsDataGathers().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().InsightsDataGathers().Watch(ctx, options) }, }, &apiconfigv1alpha1.InsightsDataGather{}, diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/insightsdatagather.go new file mode 100644 index 0000000000..28ceb2d75f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/insightsdatagather.go @@ -0,0 +1,85 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + context "context" + time "time" + + apiconfigv1alpha2 "github.com/openshift/api/config/v1alpha2" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1alpha2 "github.com/openshift/client-go/config/listers/config/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherInformer provides access to a shared informer and lister for +// InsightsDataGathers. +type InsightsDataGatherInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1alpha2.InsightsDataGatherLister +} + +type insightsDataGatherInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha2().InsightsDataGathers().List(context.Background(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha2().InsightsDataGathers().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha2().InsightsDataGathers().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha2().InsightsDataGathers().Watch(ctx, options) + }, + }, + &apiconfigv1alpha2.InsightsDataGather{}, + resyncPeriod, + indexers, + ) +} + +func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1alpha2.InsightsDataGather{}, f.defaultInformer) +} + +func (f *insightsDataGatherInformer) Lister() configv1alpha2.InsightsDataGatherLister { + return configv1alpha2.NewInsightsDataGatherLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/interface.go new file mode 100644 index 0000000000..f7d8f276f4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha2/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // InsightsDataGathers returns a InsightsDataGatherInformer. + InsightsDataGathers() InsightsDataGatherInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// InsightsDataGathers returns a InsightsDataGatherInformer. +func (v *version) InsightsDataGathers() InsightsDataGatherInformer { + return &insightsDataGatherInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index 9135d1fcc1..59c98ea77c 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -7,6 +7,7 @@ import ( v1 "github.com/openshift/api/config/v1" v1alpha1 "github.com/openshift/api/config/v1alpha1" + v1alpha2 "github.com/openshift/api/config/v1alpha2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -44,6 +45,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Authentications().Informer()}, nil case v1.SchemeGroupVersion.WithResource("builds"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterimagepolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterImagePolicies().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusteroperators"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterOperators().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterversions"): @@ -60,6 +63,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageContentPolicies().Informer()}, nil case v1.SchemeGroupVersion.WithResource("imagedigestmirrorsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageDigestMirrorSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagepolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImagePolicies().Informer()}, nil case v1.SchemeGroupVersion.WithResource("imagetagmirrorsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageTagMirrorSets().Informer()}, nil case v1.SchemeGroupVersion.WithResource("infrastructures"): @@ -93,6 +98,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1alpha1.SchemeGroupVersion.WithResource("insightsdatagathers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().InsightsDataGathers().Informer()}, nil + // Group=config.openshift.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("insightsdatagathers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha2().InsightsDataGathers().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterimagepolicy.go new file mode 100644 index 0000000000..693b815939 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterimagepolicy.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterImagePolicyLister helps list ClusterImagePolicies. +// All objects returned here must be treated as read-only. +type ClusterImagePolicyLister interface { + // List lists all ClusterImagePolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ClusterImagePolicy, err error) + // Get retrieves the ClusterImagePolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ClusterImagePolicy, error) + ClusterImagePolicyListerExpansion +} + +// clusterImagePolicyLister implements the ClusterImagePolicyLister interface. +type clusterImagePolicyLister struct { + listers.ResourceIndexer[*configv1.ClusterImagePolicy] +} + +// NewClusterImagePolicyLister returns a new ClusterImagePolicyLister. +func NewClusterImagePolicyLister(indexer cache.Indexer) ClusterImagePolicyLister { + return &clusterImagePolicyLister{listers.New[*configv1.ClusterImagePolicy](indexer, configv1.Resource("clusterimagepolicy"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go index b5d6fc088b..d4e79cd0ea 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go @@ -14,6 +14,10 @@ type AuthenticationListerExpansion interface{} // BuildLister. type BuildListerExpansion interface{} +// ClusterImagePolicyListerExpansion allows custom methods to be added to +// ClusterImagePolicyLister. +type ClusterImagePolicyListerExpansion interface{} + // ClusterOperatorListerExpansion allows custom methods to be added to // ClusterOperatorLister. type ClusterOperatorListerExpansion interface{} @@ -46,6 +50,14 @@ type ImageContentPolicyListerExpansion interface{} // ImageDigestMirrorSetLister. type ImageDigestMirrorSetListerExpansion interface{} +// ImagePolicyListerExpansion allows custom methods to be added to +// ImagePolicyLister. +type ImagePolicyListerExpansion interface{} + +// ImagePolicyNamespaceListerExpansion allows custom methods to be added to +// ImagePolicyNamespaceLister. +type ImagePolicyNamespaceListerExpansion interface{} + // ImageTagMirrorSetListerExpansion allows custom methods to be added to // ImageTagMirrorSetLister. type ImageTagMirrorSetListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagepolicy.go new file mode 100644 index 0000000000..38f4e20ef1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagepolicy.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ImagePolicyLister helps list ImagePolicies. +// All objects returned here must be treated as read-only. +type ImagePolicyLister interface { + // List lists all ImagePolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ImagePolicy, err error) + // ImagePolicies returns an object that can list and get ImagePolicies. + ImagePolicies(namespace string) ImagePolicyNamespaceLister + ImagePolicyListerExpansion +} + +// imagePolicyLister implements the ImagePolicyLister interface. +type imagePolicyLister struct { + listers.ResourceIndexer[*configv1.ImagePolicy] +} + +// NewImagePolicyLister returns a new ImagePolicyLister. +func NewImagePolicyLister(indexer cache.Indexer) ImagePolicyLister { + return &imagePolicyLister{listers.New[*configv1.ImagePolicy](indexer, configv1.Resource("imagepolicy"))} +} + +// ImagePolicies returns an object that can list and get ImagePolicies. +func (s *imagePolicyLister) ImagePolicies(namespace string) ImagePolicyNamespaceLister { + return imagePolicyNamespaceLister{listers.NewNamespaced[*configv1.ImagePolicy](s.ResourceIndexer, namespace)} +} + +// ImagePolicyNamespaceLister helps list and get ImagePolicies. +// All objects returned here must be treated as read-only. +type ImagePolicyNamespaceLister interface { + // List lists all ImagePolicies in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.ImagePolicy, err error) + // Get retrieves the ImagePolicy from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.ImagePolicy, error) + ImagePolicyNamespaceListerExpansion +} + +// imagePolicyNamespaceLister implements the ImagePolicyNamespaceLister +// interface. +type imagePolicyNamespaceLister struct { + listers.ResourceIndexer[*configv1.ImagePolicy] +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/expansion_generated.go new file mode 100644 index 0000000000..edd6ab8c51 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/expansion_generated.go @@ -0,0 +1,7 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// InsightsDataGatherListerExpansion allows custom methods to be added to +// InsightsDataGatherLister. +type InsightsDataGatherListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/insightsdatagather.go new file mode 100644 index 0000000000..13f0c3e4d8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha2/insightsdatagather.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + configv1alpha2 "github.com/openshift/api/config/v1alpha2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherLister helps list InsightsDataGathers. +// All objects returned here must be treated as read-only. +type InsightsDataGatherLister interface { + // List lists all InsightsDataGathers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1alpha2.InsightsDataGather, err error) + // Get retrieves the InsightsDataGather from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1alpha2.InsightsDataGather, error) + InsightsDataGatherListerExpansion +} + +// insightsDataGatherLister implements the InsightsDataGatherLister interface. +type insightsDataGatherLister struct { + listers.ResourceIndexer[*configv1alpha2.InsightsDataGather] +} + +// NewInsightsDataGatherLister returns a new InsightsDataGatherLister. +func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister { + return &insightsDataGatherLister{listers.New[*configv1alpha2.InsightsDataGather](indexer, configv1alpha2.Resource("insightsdatagather"))} +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go index a71b069083..9e5e51b487 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go @@ -63,6 +63,16 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.machineconfiguration.v1.MachineConfigNode + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable - name: com.github.openshift.api.machineconfiguration.v1.MachineConfigPool scalar: untyped list: @@ -423,7 +433,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: containerfileArch type: scalar: string - default: "" - name: content type: scalar: string diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignode.go new file mode 100644 index 0000000000..1f55801861 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignode.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + internal "github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineConfigNodeApplyConfiguration represents a declarative configuration of the MachineConfigNode type for use +// with apply. +type MachineConfigNodeApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MachineConfigNodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *MachineConfigNodeStatusApplyConfiguration `json:"status,omitempty"` +} + +// MachineConfigNode constructs a declarative configuration of the MachineConfigNode type for use with +// apply. +func MachineConfigNode(name string) *MachineConfigNodeApplyConfiguration { + b := &MachineConfigNodeApplyConfiguration{} + b.WithName(name) + b.WithKind("MachineConfigNode") + b.WithAPIVersion("machineconfiguration.openshift.io/v1") + return b +} + +// ExtractMachineConfigNode extracts the applied configuration owned by fieldManager from +// machineConfigNode. If no managedFields are found in machineConfigNode for fieldManager, a +// MachineConfigNodeApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// machineConfigNode must be a unmodified MachineConfigNode API object that was retrieved from the Kubernetes API. +// ExtractMachineConfigNode provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMachineConfigNode(machineConfigNode *machineconfigurationv1.MachineConfigNode, fieldManager string) (*MachineConfigNodeApplyConfiguration, error) { + return extractMachineConfigNode(machineConfigNode, fieldManager, "") +} + +// ExtractMachineConfigNodeStatus is the same as ExtractMachineConfigNode except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMachineConfigNodeStatus(machineConfigNode *machineconfigurationv1.MachineConfigNode, fieldManager string) (*MachineConfigNodeApplyConfiguration, error) { + return extractMachineConfigNode(machineConfigNode, fieldManager, "status") +} + +func extractMachineConfigNode(machineConfigNode *machineconfigurationv1.MachineConfigNode, fieldManager string, subresource string) (*MachineConfigNodeApplyConfiguration, error) { + b := &MachineConfigNodeApplyConfiguration{} + err := managedfields.ExtractInto(machineConfigNode, internal.Parser().Type("com.github.openshift.api.machineconfiguration.v1.MachineConfigNode"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(machineConfigNode.Name) + + b.WithKind("MachineConfigNode") + b.WithAPIVersion("machineconfiguration.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithKind(value string) *MachineConfigNodeApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithAPIVersion(value string) *MachineConfigNodeApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithName(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithGenerateName(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithNamespace(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithUID(value types.UID) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithResourceVersion(value string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithGeneration(value int64) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MachineConfigNodeApplyConfiguration) WithLabels(entries map[string]string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MachineConfigNodeApplyConfiguration) WithAnnotations(entries map[string]string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MachineConfigNodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MachineConfigNodeApplyConfiguration) WithFinalizers(values ...string) *MachineConfigNodeApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *MachineConfigNodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithSpec(value *MachineConfigNodeSpecApplyConfiguration) *MachineConfigNodeApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MachineConfigNodeApplyConfiguration) WithStatus(value *MachineConfigNodeStatusApplyConfiguration) *MachineConfigNodeApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MachineConfigNodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespec.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespec.go new file mode 100644 index 0000000000..d1e04cef84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespec.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigNodeSpecApplyConfiguration represents a declarative configuration of the MachineConfigNodeSpec type for use +// with apply. +type MachineConfigNodeSpecApplyConfiguration struct { + Node *MCOObjectReferenceApplyConfiguration `json:"node,omitempty"` + Pool *MCOObjectReferenceApplyConfiguration `json:"pool,omitempty"` + ConfigVersion *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration `json:"configVersion,omitempty"` +} + +// MachineConfigNodeSpecApplyConfiguration constructs a declarative configuration of the MachineConfigNodeSpec type for use with +// apply. +func MachineConfigNodeSpec() *MachineConfigNodeSpecApplyConfiguration { + return &MachineConfigNodeSpecApplyConfiguration{} +} + +// WithNode sets the Node field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Node field is set to the value of the last call. +func (b *MachineConfigNodeSpecApplyConfiguration) WithNode(value *MCOObjectReferenceApplyConfiguration) *MachineConfigNodeSpecApplyConfiguration { + b.Node = value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *MachineConfigNodeSpecApplyConfiguration) WithPool(value *MCOObjectReferenceApplyConfiguration) *MachineConfigNodeSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithConfigVersion sets the ConfigVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigVersion field is set to the value of the last call. +func (b *MachineConfigNodeSpecApplyConfiguration) WithConfigVersion(value *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration) *MachineConfigNodeSpecApplyConfiguration { + b.ConfigVersion = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespecmachineconfigversion.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespecmachineconfigversion.go new file mode 100644 index 0000000000..3657f3352f --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodespecmachineconfigversion.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigNodeSpecMachineConfigVersionApplyConfiguration represents a declarative configuration of the MachineConfigNodeSpecMachineConfigVersion type for use +// with apply. +type MachineConfigNodeSpecMachineConfigVersionApplyConfiguration struct { + Desired *string `json:"desired,omitempty"` +} + +// MachineConfigNodeSpecMachineConfigVersionApplyConfiguration constructs a declarative configuration of the MachineConfigNodeSpecMachineConfigVersion type for use with +// apply. +func MachineConfigNodeSpecMachineConfigVersion() *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration { + return &MachineConfigNodeSpecMachineConfigVersionApplyConfiguration{} +} + +// WithDesired sets the Desired field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Desired field is set to the value of the last call. +func (b *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration) WithDesired(value string) *MachineConfigNodeSpecMachineConfigVersionApplyConfiguration { + b.Desired = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go new file mode 100644 index 0000000000..abfc04d09c --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go @@ -0,0 +1,64 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineConfigNodeStatusApplyConfiguration represents a declarative configuration of the MachineConfigNodeStatus type for use +// with apply. +type MachineConfigNodeStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + ConfigVersion *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration `json:"configVersion,omitempty"` + PinnedImageSets []MachineConfigNodeStatusPinnedImageSetApplyConfiguration `json:"pinnedImageSets,omitempty"` +} + +// MachineConfigNodeStatusApplyConfiguration constructs a declarative configuration of the MachineConfigNodeStatus type for use with +// apply. +func MachineConfigNodeStatus() *MachineConfigNodeStatusApplyConfiguration { + return &MachineConfigNodeStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MachineConfigNodeStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *MachineConfigNodeStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *MachineConfigNodeStatusApplyConfiguration) WithObservedGeneration(value int64) *MachineConfigNodeStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithConfigVersion sets the ConfigVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigVersion field is set to the value of the last call. +func (b *MachineConfigNodeStatusApplyConfiguration) WithConfigVersion(value *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration) *MachineConfigNodeStatusApplyConfiguration { + b.ConfigVersion = value + return b +} + +// WithPinnedImageSets adds the given value to the PinnedImageSets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PinnedImageSets field. +func (b *MachineConfigNodeStatusApplyConfiguration) WithPinnedImageSets(values ...*MachineConfigNodeStatusPinnedImageSetApplyConfiguration) *MachineConfigNodeStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPinnedImageSets") + } + b.PinnedImageSets = append(b.PinnedImageSets, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusmachineconfigversion.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusmachineconfigversion.go new file mode 100644 index 0000000000..1b6276d9a2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusmachineconfigversion.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigNodeStatusMachineConfigVersionApplyConfiguration represents a declarative configuration of the MachineConfigNodeStatusMachineConfigVersion type for use +// with apply. +type MachineConfigNodeStatusMachineConfigVersionApplyConfiguration struct { + Current *string `json:"current,omitempty"` + Desired *string `json:"desired,omitempty"` +} + +// MachineConfigNodeStatusMachineConfigVersionApplyConfiguration constructs a declarative configuration of the MachineConfigNodeStatusMachineConfigVersion type for use with +// apply. +func MachineConfigNodeStatusMachineConfigVersion() *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration { + return &MachineConfigNodeStatusMachineConfigVersionApplyConfiguration{} +} + +// WithCurrent sets the Current field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Current field is set to the value of the last call. +func (b *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration) WithCurrent(value string) *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration { + b.Current = &value + return b +} + +// WithDesired sets the Desired field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Desired field is set to the value of the last call. +func (b *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration) WithDesired(value string) *MachineConfigNodeStatusMachineConfigVersionApplyConfiguration { + b.Desired = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatuspinnedimageset.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatuspinnedimageset.go new file mode 100644 index 0000000000..35af400461 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatuspinnedimageset.go @@ -0,0 +1,59 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigNodeStatusPinnedImageSetApplyConfiguration represents a declarative configuration of the MachineConfigNodeStatusPinnedImageSet type for use +// with apply. +type MachineConfigNodeStatusPinnedImageSetApplyConfiguration struct { + Name *string `json:"name,omitempty"` + CurrentGeneration *int32 `json:"currentGeneration,omitempty"` + DesiredGeneration *int32 `json:"desiredGeneration,omitempty"` + LastFailedGeneration *int32 `json:"lastFailedGeneration,omitempty"` + LastFailedGenerationError *string `json:"lastFailedGenerationError,omitempty"` +} + +// MachineConfigNodeStatusPinnedImageSetApplyConfiguration constructs a declarative configuration of the MachineConfigNodeStatusPinnedImageSet type for use with +// apply. +func MachineConfigNodeStatusPinnedImageSet() *MachineConfigNodeStatusPinnedImageSetApplyConfiguration { + return &MachineConfigNodeStatusPinnedImageSetApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigNodeStatusPinnedImageSetApplyConfiguration) WithName(value string) *MachineConfigNodeStatusPinnedImageSetApplyConfiguration { + b.Name = &value + return b +} + +// WithCurrentGeneration sets the CurrentGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentGeneration field is set to the value of the last call. +func (b *MachineConfigNodeStatusPinnedImageSetApplyConfiguration) WithCurrentGeneration(value int32) *MachineConfigNodeStatusPinnedImageSetApplyConfiguration { + b.CurrentGeneration = &value + return b +} + +// WithDesiredGeneration sets the DesiredGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DesiredGeneration field is set to the value of the last call. +func (b *MachineConfigNodeStatusPinnedImageSetApplyConfiguration) WithDesiredGeneration(value int32) *MachineConfigNodeStatusPinnedImageSetApplyConfiguration { + b.DesiredGeneration = &value + return b +} + +// WithLastFailedGeneration sets the LastFailedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFailedGeneration field is set to the value of the last call. +func (b *MachineConfigNodeStatusPinnedImageSetApplyConfiguration) WithLastFailedGeneration(value int32) *MachineConfigNodeStatusPinnedImageSetApplyConfiguration { + b.LastFailedGeneration = &value + return b +} + +// WithLastFailedGenerationError sets the LastFailedGenerationError field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFailedGenerationError field is set to the value of the last call. +func (b *MachineConfigNodeStatusPinnedImageSetApplyConfiguration) WithLastFailedGenerationError(value string) *MachineConfigNodeStatusPinnedImageSetApplyConfiguration { + b.LastFailedGenerationError = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/mcoobjectreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/mcoobjectreference.go new file mode 100644 index 0000000000..a8101be272 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/mcoobjectreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MCOObjectReferenceApplyConfiguration represents a declarative configuration of the MCOObjectReference type for use +// with apply. +type MCOObjectReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// MCOObjectReferenceApplyConfiguration constructs a declarative configuration of the MCOObjectReference type for use with +// apply. +func MCOObjectReference() *MCOObjectReferenceApplyConfiguration { + return &MCOObjectReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MCOObjectReferenceApplyConfiguration) WithName(value string) *MCOObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go index 185b67c3d2..c326255951 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go @@ -10,6 +10,8 @@ type KubeletConfigExpansion interface{} type MachineConfigExpansion interface{} +type MachineConfigNodeExpansion interface{} + type MachineConfigPoolExpansion interface{} type MachineOSBuildExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfignode.go new file mode 100644 index 0000000000..72ea1044cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfignode.go @@ -0,0 +1,60 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + applyconfigurationsmachineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1" + scheme "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// MachineConfigNodesGetter has a method to return a MachineConfigNodeInterface. +// A group's client should implement this interface. +type MachineConfigNodesGetter interface { + MachineConfigNodes() MachineConfigNodeInterface +} + +// MachineConfigNodeInterface has methods to work with MachineConfigNode resources. +type MachineConfigNodeInterface interface { + Create(ctx context.Context, machineConfigNode *machineconfigurationv1.MachineConfigNode, opts metav1.CreateOptions) (*machineconfigurationv1.MachineConfigNode, error) + Update(ctx context.Context, machineConfigNode *machineconfigurationv1.MachineConfigNode, opts metav1.UpdateOptions) (*machineconfigurationv1.MachineConfigNode, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, machineConfigNode *machineconfigurationv1.MachineConfigNode, opts metav1.UpdateOptions) (*machineconfigurationv1.MachineConfigNode, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*machineconfigurationv1.MachineConfigNode, error) + List(ctx context.Context, opts metav1.ListOptions) (*machineconfigurationv1.MachineConfigNodeList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *machineconfigurationv1.MachineConfigNode, err error) + Apply(ctx context.Context, machineConfigNode *applyconfigurationsmachineconfigurationv1.MachineConfigNodeApplyConfiguration, opts metav1.ApplyOptions) (result *machineconfigurationv1.MachineConfigNode, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, machineConfigNode *applyconfigurationsmachineconfigurationv1.MachineConfigNodeApplyConfiguration, opts metav1.ApplyOptions) (result *machineconfigurationv1.MachineConfigNode, err error) + MachineConfigNodeExpansion +} + +// machineConfigNodes implements MachineConfigNodeInterface +type machineConfigNodes struct { + *gentype.ClientWithListAndApply[*machineconfigurationv1.MachineConfigNode, *machineconfigurationv1.MachineConfigNodeList, *applyconfigurationsmachineconfigurationv1.MachineConfigNodeApplyConfiguration] +} + +// newMachineConfigNodes returns a MachineConfigNodes +func newMachineConfigNodes(c *MachineconfigurationV1Client) *machineConfigNodes { + return &machineConfigNodes{ + gentype.NewClientWithListAndApply[*machineconfigurationv1.MachineConfigNode, *machineconfigurationv1.MachineConfigNodeList, *applyconfigurationsmachineconfigurationv1.MachineConfigNodeApplyConfiguration]( + "machineconfignodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *machineconfigurationv1.MachineConfigNode { return &machineconfigurationv1.MachineConfigNode{} }, + func() *machineconfigurationv1.MachineConfigNodeList { + return &machineconfigurationv1.MachineConfigNodeList{} + }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go index 10a0351753..a30fc68430 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go @@ -16,6 +16,7 @@ type MachineconfigurationV1Interface interface { ControllerConfigsGetter KubeletConfigsGetter MachineConfigsGetter + MachineConfigNodesGetter MachineConfigPoolsGetter MachineOSBuildsGetter MachineOSConfigsGetter @@ -43,6 +44,10 @@ func (c *MachineconfigurationV1Client) MachineConfigs() MachineConfigInterface { return newMachineConfigs(c) } +func (c *MachineconfigurationV1Client) MachineConfigNodes() MachineConfigNodeInterface { + return newMachineConfigNodes(c) +} + func (c *MachineconfigurationV1Client) MachineConfigPools() MachineConfigPoolInterface { return newMachineConfigPools(c) } @@ -64,9 +69,7 @@ func (c *MachineconfigurationV1Client) PinnedImageSets() PinnedImageSetInterface // where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*MachineconfigurationV1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err @@ -78,9 +81,7 @@ func NewForConfig(c *rest.Config) (*MachineconfigurationV1Client, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MachineconfigurationV1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err @@ -103,7 +104,7 @@ func New(c rest.Interface) *MachineconfigurationV1Client { return &MachineconfigurationV1Client{c} } -func setConfigDefaults(config *rest.Config) error { +func setConfigDefaults(config *rest.Config) { gv := machineconfigurationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" @@ -112,8 +113,6 @@ func setConfigDefaults(config *rest.Config) error { if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - - return nil } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go index 3d6be9ecdb..fceb825dd7 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go @@ -44,9 +44,7 @@ func (c *MachineconfigurationV1alpha1Client) PinnedImageSets() PinnedImageSetInt // where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*MachineconfigurationV1alpha1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) httpClient, err := rest.HTTPClientFor(&config) if err != nil { return nil, err @@ -58,9 +56,7 @@ func NewForConfig(c *rest.Config) (*MachineconfigurationV1alpha1Client, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MachineconfigurationV1alpha1Client, error) { config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } + setConfigDefaults(&config) client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err @@ -83,7 +79,7 @@ func New(c rest.Interface) *MachineconfigurationV1alpha1Client { return &MachineconfigurationV1alpha1Client{c} } -func setConfigDefaults(config *rest.Config) error { +func setConfigDefaults(config *rest.Config) { gv := machineconfigurationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" @@ -92,8 +88,6 @@ func setConfigDefaults(config *rest.Config) error { if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } - - return nil } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go index 37deff0dc7..e42238de73 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go @@ -46,6 +46,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().KubeletConfigs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("machineconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigs().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("machineconfignodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigNodes().Informer()}, nil case v1.SchemeGroupVersion.WithResource("machineconfigpools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigPools().Informer()}, nil case v1.SchemeGroupVersion.WithResource("machineosbuilds"): diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/containerruntimeconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/containerruntimeconfig.go index 93e76c66ab..e71bb10b96 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/containerruntimeconfig.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/containerruntimeconfig.go @@ -45,13 +45,25 @@ func NewFilteredContainerRuntimeConfigInformer(client versioned.Interface, resyn if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().ContainerRuntimeConfigs().List(context.TODO(), options) + return client.MachineconfigurationV1().ContainerRuntimeConfigs().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().ContainerRuntimeConfigs().Watch(context.TODO(), options) + return client.MachineconfigurationV1().ContainerRuntimeConfigs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().ContainerRuntimeConfigs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().ContainerRuntimeConfigs().Watch(ctx, options) }, }, &apimachineconfigurationv1.ContainerRuntimeConfig{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/controllerconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/controllerconfig.go index b1930d0f54..540dcc0b53 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/controllerconfig.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/controllerconfig.go @@ -45,13 +45,25 @@ func NewFilteredControllerConfigInformer(client versioned.Interface, resyncPerio if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().ControllerConfigs().List(context.TODO(), options) + return client.MachineconfigurationV1().ControllerConfigs().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().ControllerConfigs().Watch(context.TODO(), options) + return client.MachineconfigurationV1().ControllerConfigs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().ControllerConfigs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().ControllerConfigs().Watch(ctx, options) }, }, &apimachineconfigurationv1.ControllerConfig{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go index e579c5d488..37304154e5 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go @@ -16,6 +16,8 @@ type Interface interface { KubeletConfigs() KubeletConfigInformer // MachineConfigs returns a MachineConfigInformer. MachineConfigs() MachineConfigInformer + // MachineConfigNodes returns a MachineConfigNodeInformer. + MachineConfigNodes() MachineConfigNodeInformer // MachineConfigPools returns a MachineConfigPoolInformer. MachineConfigPools() MachineConfigPoolInformer // MachineOSBuilds returns a MachineOSBuildInformer. @@ -57,6 +59,11 @@ func (v *version) MachineConfigs() MachineConfigInformer { return &machineConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// MachineConfigNodes returns a MachineConfigNodeInformer. +func (v *version) MachineConfigNodes() MachineConfigNodeInformer { + return &machineConfigNodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // MachineConfigPools returns a MachineConfigPoolInformer. func (v *version) MachineConfigPools() MachineConfigPoolInformer { return &machineConfigPoolInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/kubeletconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/kubeletconfig.go index a9e9fad855..faa7d51817 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/kubeletconfig.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/kubeletconfig.go @@ -45,13 +45,25 @@ func NewFilteredKubeletConfigInformer(client versioned.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().KubeletConfigs().List(context.TODO(), options) + return client.MachineconfigurationV1().KubeletConfigs().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().KubeletConfigs().Watch(context.TODO(), options) + return client.MachineconfigurationV1().KubeletConfigs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().KubeletConfigs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().KubeletConfigs().Watch(ctx, options) }, }, &apimachineconfigurationv1.KubeletConfig{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfig.go index f0b1ae9c4e..6748f1bb0d 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfig.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfig.go @@ -45,13 +45,25 @@ func NewFilteredMachineConfigInformer(client versioned.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineConfigs().List(context.TODO(), options) + return client.MachineconfigurationV1().MachineConfigs().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineConfigs().Watch(context.TODO(), options) + return client.MachineconfigurationV1().MachineConfigs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigs().Watch(ctx, options) }, }, &apimachineconfigurationv1.MachineConfig{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfignode.go new file mode 100644 index 0000000000..f56d0eb22c --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfignode.go @@ -0,0 +1,85 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apimachineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + versioned "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces" + machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// MachineConfigNodeInformer provides access to a shared informer and lister for +// MachineConfigNodes. +type MachineConfigNodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() machineconfigurationv1.MachineConfigNodeLister +} + +type machineConfigNodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMachineConfigNodeInformer constructs a new informer for MachineConfigNode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMachineConfigNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMachineConfigNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMachineConfigNodeInformer constructs a new informer for MachineConfigNode type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMachineConfigNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigNodes().List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigNodes().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigNodes().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigNodes().Watch(ctx, options) + }, + }, + &apimachineconfigurationv1.MachineConfigNode{}, + resyncPeriod, + indexers, + ) +} + +func (f *machineConfigNodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMachineConfigNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *machineConfigNodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apimachineconfigurationv1.MachineConfigNode{}, f.defaultInformer) +} + +func (f *machineConfigNodeInformer) Lister() machineconfigurationv1.MachineConfigNodeLister { + return machineconfigurationv1.NewMachineConfigNodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfigpool.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfigpool.go index cba2c0f689..c7bebbc028 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfigpool.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineconfigpool.go @@ -45,13 +45,25 @@ func NewFilteredMachineConfigPoolInformer(client versioned.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineConfigPools().List(context.TODO(), options) + return client.MachineconfigurationV1().MachineConfigPools().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineConfigPools().Watch(context.TODO(), options) + return client.MachineconfigurationV1().MachineConfigPools().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigPools().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineConfigPools().Watch(ctx, options) }, }, &apimachineconfigurationv1.MachineConfigPool{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go index a3e1017516..0037fa8edf 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go @@ -45,13 +45,25 @@ func NewFilteredMachineOSBuildInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineOSBuilds().List(context.TODO(), options) + return client.MachineconfigurationV1().MachineOSBuilds().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineOSBuilds().Watch(context.TODO(), options) + return client.MachineconfigurationV1().MachineOSBuilds().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSBuilds().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSBuilds().Watch(ctx, options) }, }, &apimachineconfigurationv1.MachineOSBuild{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go index c216b75572..bf458b0239 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go @@ -45,13 +45,25 @@ func NewFilteredMachineOSConfigInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineOSConfigs().List(context.TODO(), options) + return client.MachineconfigurationV1().MachineOSConfigs().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().MachineOSConfigs().Watch(context.TODO(), options) + return client.MachineconfigurationV1().MachineOSConfigs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSConfigs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSConfigs().Watch(ctx, options) }, }, &apimachineconfigurationv1.MachineOSConfig{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/pinnedimageset.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/pinnedimageset.go index 2d549af535..7fad19b32f 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/pinnedimageset.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/pinnedimageset.go @@ -45,13 +45,25 @@ func NewFilteredPinnedImageSetInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().PinnedImageSets().List(context.TODO(), options) + return client.MachineconfigurationV1().PinnedImageSets().List(context.Background(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1().PinnedImageSets().Watch(context.TODO(), options) + return client.MachineconfigurationV1().PinnedImageSets().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().PinnedImageSets().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().PinnedImageSets().Watch(ctx, options) }, }, &apimachineconfigurationv1.PinnedImageSet{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go index 53cc3a1b39..7e53c0bb93 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineconfignode.go @@ -45,13 +45,25 @@ func NewFilteredMachineConfigNodeInformer(client versioned.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().MachineConfigNodes().List(context.TODO(), options) + return client.MachineconfigurationV1alpha1().MachineConfigNodes().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().MachineConfigNodes().Watch(context.TODO(), options) + return client.MachineconfigurationV1alpha1().MachineConfigNodes().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineConfigNodes().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineConfigNodes().Watch(ctx, options) }, }, &apimachineconfigurationv1alpha1.MachineConfigNode{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosbuild.go index 12154a263e..8be964e32c 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosbuild.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosbuild.go @@ -45,13 +45,25 @@ func NewFilteredMachineOSBuildInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().MachineOSBuilds().List(context.TODO(), options) + return client.MachineconfigurationV1alpha1().MachineOSBuilds().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().MachineOSBuilds().Watch(context.TODO(), options) + return client.MachineconfigurationV1alpha1().MachineOSBuilds().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineOSBuilds().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineOSBuilds().Watch(ctx, options) }, }, &apimachineconfigurationv1alpha1.MachineOSBuild{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosconfig.go index d96475801d..3e3af42bc5 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosconfig.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/machineosconfig.go @@ -45,13 +45,25 @@ func NewFilteredMachineOSConfigInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), options) + return client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().MachineOSConfigs().Watch(context.TODO(), options) + return client.MachineconfigurationV1alpha1().MachineOSConfigs().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineOSConfigs().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().MachineOSConfigs().Watch(ctx, options) }, }, &apimachineconfigurationv1alpha1.MachineOSConfig{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/pinnedimageset.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/pinnedimageset.go index e5759c4e34..1eeb017347 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/pinnedimageset.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1/pinnedimageset.go @@ -45,13 +45,25 @@ func NewFilteredPinnedImageSetInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().PinnedImageSets().List(context.TODO(), options) + return client.MachineconfigurationV1alpha1().PinnedImageSets().List(context.Background(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.MachineconfigurationV1alpha1().PinnedImageSets().Watch(context.TODO(), options) + return client.MachineconfigurationV1alpha1().PinnedImageSets().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().PinnedImageSets().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1alpha1().PinnedImageSets().Watch(ctx, options) }, }, &apimachineconfigurationv1alpha1.PinnedImageSet{}, diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go index 030325498b..7198dc96be 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go @@ -18,6 +18,10 @@ type KubeletConfigListerExpansion interface{} // MachineConfigLister. type MachineConfigListerExpansion interface{} +// MachineConfigNodeListerExpansion allows custom methods to be added to +// MachineConfigNodeLister. +type MachineConfigNodeListerExpansion interface{} + // MachineConfigPoolListerExpansion allows custom methods to be added to // MachineConfigPoolLister. type MachineConfigPoolListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineconfignode.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineconfignode.go new file mode 100644 index 0000000000..79de86fb71 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineconfignode.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// MachineConfigNodeLister helps list MachineConfigNodes. +// All objects returned here must be treated as read-only. +type MachineConfigNodeLister interface { + // List lists all MachineConfigNodes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*machineconfigurationv1.MachineConfigNode, err error) + // Get retrieves the MachineConfigNode from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*machineconfigurationv1.MachineConfigNode, error) + MachineConfigNodeListerExpansion +} + +// machineConfigNodeLister implements the MachineConfigNodeLister interface. +type machineConfigNodeLister struct { + listers.ResourceIndexer[*machineconfigurationv1.MachineConfigNode] +} + +// NewMachineConfigNodeLister returns a new MachineConfigNodeLister. +func NewMachineConfigNodeLister(indexer cache.Indexer) MachineConfigNodeLister { + return &machineConfigNodeLister{listers.New[*machineconfigurationv1.MachineConfigNode](indexer, machineconfigurationv1.Resource("machineconfignode"))} +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000..ddec4b73ef --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -0,0 +1,4473 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + fmt "fmt" + sync "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.config.v1.ConfigMapFileReference + map: + fields: + - name: key + type: + scalar: string + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.ConfigMapNameReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.CustomTLSProfile + map: + fields: + - name: ciphers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: minTLSVersion + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.IntermediateTLSProfile + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.ModernTLSProfile + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.OldTLSProfile + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.config.v1.SecretNameReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TLSProfileSpec + map: + fields: + - name: ciphers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: minTLSVersion + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.TLSSecurityProfile + map: + fields: + - name: custom + type: + namedType: com.github.openshift.api.config.v1.CustomTLSProfile + - name: intermediate + type: + namedType: com.github.openshift.api.config.v1.IntermediateTLSProfile + - name: modern + type: + namedType: com.github.openshift.api.config.v1.ModernTLSProfile + - name: old + type: + namedType: com.github.openshift.api.config.v1.OldTLSProfile + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: custom + discriminatorValue: Custom + - fieldName: intermediate + discriminatorValue: Intermediate + - fieldName: modern + discriminatorValue: Modern + - fieldName: old + discriminatorValue: Old +- name: com.github.openshift.api.operator.v1.AWSCSIDriverConfigSpec + map: + fields: + - name: efsVolumeMetrics + type: + namedType: com.github.openshift.api.operator.v1.AWSEFSVolumeMetrics + - name: kmsKeyARN + type: + scalar: string +- name: com.github.openshift.api.operator.v1.AWSClassicLoadBalancerParameters + map: + fields: + - name: connectionIdleTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: subnets + type: + namedType: com.github.openshift.api.operator.v1.AWSSubnets +- name: com.github.openshift.api.operator.v1.AWSEFSVolumeMetrics + map: + fields: + - name: recursiveWalk + type: + namedType: com.github.openshift.api.operator.v1.AWSEFSVolumeMetricsRecursiveWalkConfig + - name: state + type: + scalar: string + default: "" + unions: + - discriminator: state + fields: + - fieldName: recursiveWalk + discriminatorValue: RecursiveWalk +- name: com.github.openshift.api.operator.v1.AWSEFSVolumeMetricsRecursiveWalkConfig + map: + fields: + - name: fsRateLimit + type: + scalar: numeric + - name: refreshPeriodMinutes + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.AWSLoadBalancerParameters + map: + fields: + - name: classicLoadBalancer + type: + namedType: com.github.openshift.api.operator.v1.AWSClassicLoadBalancerParameters + - name: networkLoadBalancer + type: + namedType: com.github.openshift.api.operator.v1.AWSNetworkLoadBalancerParameters + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: classicLoadBalancer + discriminatorValue: ClassicLoadBalancerParameters + - fieldName: networkLoadBalancer + discriminatorValue: NetworkLoadBalancerParameters +- name: com.github.openshift.api.operator.v1.AWSNetworkLoadBalancerParameters + map: + fields: + - name: eipAllocations + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: subnets + type: + namedType: com.github.openshift.api.operator.v1.AWSSubnets +- name: com.github.openshift.api.operator.v1.AWSSubnets + map: + fields: + - name: ids + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: names + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.AccessLogging + map: + fields: + - name: destination + type: + namedType: com.github.openshift.api.operator.v1.LoggingDestination + default: {} + - name: httpCaptureCookies + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPCookie + elementRelationship: atomic + - name: httpCaptureHeaders + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPHeaders + default: {} + - name: httpLogFormat + type: + scalar: string + - name: logEmptyRequests + type: + scalar: string +- name: com.github.openshift.api.operator.v1.AddPage + map: + fields: + - name: disabledActions + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.AdditionalNetworkDefinition + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + - name: rawCNIConfig + type: + scalar: string + - name: simpleMacvlanConfig + type: + namedType: com.github.openshift.api.operator.v1.SimpleMacvlanConfig + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.AdditionalRoutingCapabilities + map: + fields: + - name: providers + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.Authentication + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.AuthenticationSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.AuthenticationStatus + default: {} +- name: com.github.openshift.api.operator.v1.AuthenticationSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.AuthenticationStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: oauthAPIServer + type: + namedType: com.github.openshift.api.operator.v1.OAuthAPIServerStatus + default: {} + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.AzureCSIDriverConfigSpec + map: + fields: + - name: diskEncryptionSet + type: + namedType: com.github.openshift.api.operator.v1.AzureDiskEncryptionSet +- name: com.github.openshift.api.operator.v1.AzureDiskEncryptionSet + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resourceGroup + type: + scalar: string + default: "" + - name: subscriptionID + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.CSIDriverConfigSpec + map: + fields: + - name: aws + type: + namedType: com.github.openshift.api.operator.v1.AWSCSIDriverConfigSpec + - name: azure + type: + namedType: com.github.openshift.api.operator.v1.AzureCSIDriverConfigSpec + - name: driverType + type: + scalar: string + default: "" + - name: gcp + type: + namedType: com.github.openshift.api.operator.v1.GCPCSIDriverConfigSpec + - name: ibmcloud + type: + namedType: com.github.openshift.api.operator.v1.IBMCloudCSIDriverConfigSpec + - name: vSphere + type: + namedType: com.github.openshift.api.operator.v1.VSphereCSIDriverConfigSpec + unions: + - discriminator: driverType + fields: + - fieldName: aws + discriminatorValue: AWS + - fieldName: azure + discriminatorValue: Azure + - fieldName: gcp + discriminatorValue: GCP + - fieldName: ibmcloud + discriminatorValue: IBMCloud + - fieldName: vSphere + discriminatorValue: VSphere +- name: com.github.openshift.api.operator.v1.CSISnapshotController + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.CSISnapshotControllerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.CSISnapshotControllerStatus + default: {} +- name: com.github.openshift.api.operator.v1.CSISnapshotControllerSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.CSISnapshotControllerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.Capability + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: visibility + type: + namedType: com.github.openshift.api.operator.v1.CapabilityVisibility + default: {} +- name: com.github.openshift.api.operator.v1.CapabilityVisibility + map: + fields: + - name: state + type: + scalar: string + default: "" + unions: + - discriminator: state +- name: com.github.openshift.api.operator.v1.ClientTLS + map: + fields: + - name: allowedSubjectPatterns + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: clientCA + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: clientCertificatePolicy + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.CloudCredential + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.CloudCredentialSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.CloudCredentialStatus + default: {} +- name: com.github.openshift.api.operator.v1.CloudCredentialSpec + map: + fields: + - name: credentialsMode + type: + scalar: string + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.CloudCredentialStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ClusterCSIDriver + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.ClusterCSIDriverSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.ClusterCSIDriverStatus + default: {} +- name: com.github.openshift.api.operator.v1.ClusterCSIDriverSpec + map: + fields: + - name: driverConfig + type: + namedType: com.github.openshift.api.operator.v1.CSIDriverConfigSpec + default: {} + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: storageClassState + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.ClusterCSIDriverStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ClusterNetworkEntry + map: + fields: + - name: cidr + type: + scalar: string + default: "" + - name: hostPrefix + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.Config + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.ConfigSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.ConfigStatus + default: {} +- name: com.github.openshift.api.operator.v1.ConfigMapFileReference + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.ConfigSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.ConfigStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.Console + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.ConsoleSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.ConsoleStatus + default: {} +- name: com.github.openshift.api.operator.v1.ConsoleConfigRoute + map: + fields: + - name: hostname + type: + scalar: string + default: "" + - name: secret + type: + namedType: com.github.openshift.api.config.v1.SecretNameReference + default: {} +- name: com.github.openshift.api.operator.v1.ConsoleCustomization + map: + fields: + - name: addPage + type: + namedType: com.github.openshift.api.operator.v1.AddPage + default: {} + - name: brand + type: + scalar: string + - name: capabilities + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Capability + elementRelationship: associative + keys: + - name + - name: customLogoFile + type: + namedType: com.github.openshift.api.config.v1.ConfigMapFileReference + default: {} + - name: customProductName + type: + scalar: string + - name: developerCatalog + type: + namedType: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCustomization + default: {} + - name: documentationBaseURL + type: + scalar: string + - name: logos + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Logo + elementRelationship: associative + keys: + - type + - name: perspectives + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Perspective + elementRelationship: associative + keys: + - id + - name: projectAccess + type: + namedType: com.github.openshift.api.operator.v1.ProjectAccess + default: {} + - name: quickStarts + type: + namedType: com.github.openshift.api.operator.v1.QuickStarts + default: {} +- name: com.github.openshift.api.operator.v1.ConsoleProviders + map: + fields: + - name: statuspage + type: + namedType: com.github.openshift.api.operator.v1.StatuspageProvider +- name: com.github.openshift.api.operator.v1.ConsoleSpec + map: + fields: + - name: customization + type: + namedType: com.github.openshift.api.operator.v1.ConsoleCustomization + default: {} + - name: ingress + type: + namedType: com.github.openshift.api.operator.v1.Ingress + default: {} + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: plugins + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: providers + type: + namedType: com.github.openshift.api.operator.v1.ConsoleProviders + default: {} + - name: route + type: + namedType: com.github.openshift.api.operator.v1.ConsoleConfigRoute + default: {} + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.ConsoleStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ContainerLoggingDestinationParameters + map: + fields: + - name: maxLength + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.DNS + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.DNSSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.DNSStatus + default: {} +- name: com.github.openshift.api.operator.v1.DNSCache + map: + fields: + - name: negativeTTL + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: positiveTTL + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration +- name: com.github.openshift.api.operator.v1.DNSNodePlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.DNSOverTLSConfig + map: + fields: + - name: caBundle + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: serverName + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.DNSSpec + map: + fields: + - name: cache + type: + namedType: com.github.openshift.api.operator.v1.DNSCache + default: {} + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + - name: nodePlacement + type: + namedType: com.github.openshift.api.operator.v1.DNSNodePlacement + default: {} + - name: operatorLogLevel + type: + scalar: string + - name: servers + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Server + elementRelationship: atomic + - name: upstreamResolvers + type: + namedType: com.github.openshift.api.operator.v1.UpstreamResolvers + default: {} +- name: com.github.openshift.api.operator.v1.DNSStatus + map: + fields: + - name: clusterDomain + type: + scalar: string + default: "" + - name: clusterIP + type: + scalar: string + default: "" + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type +- name: com.github.openshift.api.operator.v1.DNSTransportConfig + map: + fields: + - name: tls + type: + namedType: com.github.openshift.api.operator.v1.DNSOverTLSConfig + - name: transport + type: + scalar: string + unions: + - discriminator: transport + fields: + - fieldName: tls + discriminatorValue: TLS +- name: com.github.openshift.api.operator.v1.DefaultNetworkDefinition + map: + fields: + - name: openshiftSDNConfig + type: + namedType: com.github.openshift.api.operator.v1.OpenShiftSDNConfig + - name: ovnKubernetesConfig + type: + namedType: com.github.openshift.api.operator.v1.OVNKubernetesConfig + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCategory + map: + fields: + - name: id + type: + scalar: string + default: "" + - name: label + type: + scalar: string + default: "" + - name: subcategories + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCategoryMeta + elementRelationship: atomic + - name: tags + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCategoryMeta + map: + fields: + - name: id + type: + scalar: string + default: "" + - name: label + type: + scalar: string + default: "" + - name: tags + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCustomization + map: + fields: + - name: categories + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCategory + elementRelationship: atomic + - name: types + type: + namedType: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogTypes + default: {} +- name: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogTypes + map: + fields: + - name: disabled + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: enabled + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: state + type: + scalar: string + default: Enabled + unions: + - discriminator: state + fields: + - fieldName: disabled + discriminatorValue: Disabled + - fieldName: enabled + discriminatorValue: Enabled +- name: com.github.openshift.api.operator.v1.EgressIPConfig + map: + fields: + - name: reachabilityTotalTimeoutSeconds + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.EndpointPublishingStrategy + map: + fields: + - name: hostNetwork + type: + namedType: com.github.openshift.api.operator.v1.HostNetworkStrategy + - name: loadBalancer + type: + namedType: com.github.openshift.api.operator.v1.LoadBalancerStrategy + - name: nodePort + type: + namedType: com.github.openshift.api.operator.v1.NodePortStrategy + - name: private + type: + namedType: com.github.openshift.api.operator.v1.PrivateStrategy + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: hostNetwork + discriminatorValue: HostNetwork + - fieldName: loadBalancer + discriminatorValue: LoadBalancer + - fieldName: nodePort + discriminatorValue: NodePort + - fieldName: private + discriminatorValue: Private +- name: com.github.openshift.api.operator.v1.Etcd + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.EtcdSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.EtcdStatus + default: {} +- name: com.github.openshift.api.operator.v1.EtcdSpec + map: + fields: + - name: backendQuotaGiB + type: + scalar: numeric + default: 8 + - name: controlPlaneHardwareSpeed + type: + scalar: string + default: "" + - name: failedRevisionLimit + type: + scalar: numeric + - name: forceRedeploymentReason + type: + scalar: string + default: "" + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: succeededRevisionLimit + type: + scalar: numeric + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.EtcdStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: controlPlaneHardwareSpeed + type: + scalar: string + default: "" + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: latestAvailableRevisionReason + type: + scalar: string + - name: nodeStatuses + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeStatus + elementRelationship: associative + keys: + - nodeName + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ExportNetworkFlows + map: + fields: + - name: ipfix + type: + namedType: com.github.openshift.api.operator.v1.IPFIXConfig + - name: netFlow + type: + namedType: com.github.openshift.api.operator.v1.NetFlowConfig + - name: sFlow + type: + namedType: com.github.openshift.api.operator.v1.SFlowConfig +- name: com.github.openshift.api.operator.v1.FeaturesMigration + map: + fields: + - name: egressFirewall + type: + scalar: boolean + - name: egressIP + type: + scalar: boolean + - name: multicast + type: + scalar: boolean +- name: com.github.openshift.api.operator.v1.FileReferenceSource + map: + fields: + - name: configMap + type: + namedType: com.github.openshift.api.operator.v1.ConfigMapFileReference + - name: from + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.ForwardPlugin + map: + fields: + - name: policy + type: + scalar: string + - name: protocolStrategy + type: + scalar: string + default: "" + - name: transportConfig + type: + namedType: com.github.openshift.api.operator.v1.DNSTransportConfig + default: {} + - name: upstreams + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.GCPCSIDriverConfigSpec + map: + fields: + - name: kmsKey + type: + namedType: com.github.openshift.api.operator.v1.GCPKMSKeyReference +- name: com.github.openshift.api.operator.v1.GCPKMSKeyReference + map: + fields: + - name: keyRing + type: + scalar: string + default: "" + - name: location + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: projectID + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.GCPLoadBalancerParameters + map: + fields: + - name: clientAccess + type: + scalar: string +- name: com.github.openshift.api.operator.v1.GatewayConfig + map: + fields: + - name: ipForwarding + type: + scalar: string + - name: ipv4 + type: + namedType: com.github.openshift.api.operator.v1.IPv4GatewayConfig + default: {} + - name: ipv6 + type: + namedType: com.github.openshift.api.operator.v1.IPv6GatewayConfig + default: {} + - name: routingViaHost + type: + scalar: boolean +- name: com.github.openshift.api.operator.v1.GatherStatus + map: + fields: + - name: gatherers + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GathererStatus + elementRelationship: atomic + - name: lastGatherDuration + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: lastGatherTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: com.github.openshift.api.operator.v1.GathererStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: atomic + - name: lastGatherDuration + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.GenerationStatus + map: + fields: + - name: group + type: + scalar: string + default: "" + - name: hash + type: + scalar: string + default: "" + - name: lastGeneration + type: + scalar: numeric + default: 0 + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.HTTPCompressionPolicy + map: + fields: + - name: mimeTypes + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.operator.v1.HealthCheck + map: + fields: + - name: advisorURI + type: + scalar: string + default: "" + - name: description + type: + scalar: string + default: "" + - name: state + type: + scalar: string + default: "" + - name: totalRisk + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.operator.v1.HostNetworkStrategy + map: + fields: + - name: httpPort + type: + scalar: numeric + - name: httpsPort + type: + scalar: numeric + - name: protocol + type: + scalar: string + - name: statsPort + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.HybridOverlayConfig + map: + fields: + - name: hybridClusterNetwork + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.ClusterNetworkEntry + elementRelationship: atomic + - name: hybridOverlayVXLANPort + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.IBMCloudCSIDriverConfigSpec + map: + fields: + - name: encryptionKeyCRN + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.IBMLoadBalancerParameters + map: + fields: + - name: protocol + type: + scalar: string +- name: com.github.openshift.api.operator.v1.IPAMConfig + map: + fields: + - name: staticIPAMConfig + type: + namedType: com.github.openshift.api.operator.v1.StaticIPAMConfig + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.IPFIXConfig + map: + fields: + - name: collectors + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.IPsecConfig + map: + fields: + - name: full + type: + namedType: com.github.openshift.api.operator.v1.IPsecFullModeConfig + - name: mode + type: + scalar: string + unions: + - discriminator: mode + fields: + - fieldName: full + discriminatorValue: Full +- name: com.github.openshift.api.operator.v1.IPsecFullModeConfig + map: + fields: + - name: encapsulation + type: + scalar: string +- name: com.github.openshift.api.operator.v1.IPv4GatewayConfig + map: + fields: + - name: internalMasqueradeSubnet + type: + scalar: string +- name: com.github.openshift.api.operator.v1.IPv4OVNKubernetesConfig + map: + fields: + - name: internalJoinSubnet + type: + scalar: string + - name: internalTransitSwitchSubnet + type: + scalar: string +- name: com.github.openshift.api.operator.v1.IPv6GatewayConfig + map: + fields: + - name: internalMasqueradeSubnet + type: + scalar: string +- name: com.github.openshift.api.operator.v1.IPv6OVNKubernetesConfig + map: + fields: + - name: internalJoinSubnet + type: + scalar: string + - name: internalTransitSwitchSubnet + type: + scalar: string +- name: com.github.openshift.api.operator.v1.Ingress + map: + fields: + - name: clientDownloadsURL + type: + scalar: string + default: "" + - name: consoleURL + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.IngressController + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerStatus + default: {} +- name: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPCookie + map: + fields: + - name: matchType + type: + scalar: string + default: "" + - name: maxLength + type: + scalar: numeric + default: 0 + - name: name + type: + scalar: string + default: "" + - name: namePrefix + type: + scalar: string + default: "" + unions: + - discriminator: matchType + fields: + - fieldName: name + discriminatorValue: Name + - fieldName: namePrefix + discriminatorValue: NamePrefix +- name: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPHeader + map: + fields: + - name: maxLength + type: + scalar: numeric + default: 0 + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPHeaders + map: + fields: + - name: request + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPHeader + elementRelationship: atomic + - name: response + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.IngressControllerCaptureHTTPHeader + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.IngressControllerHTTPHeader + map: + fields: + - name: action + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaderActionUnion + default: {} + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaderActionUnion + map: + fields: + - name: set + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerSetHTTPHeader + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: set + discriminatorValue: Set +- name: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaderActions + map: + fields: + - name: request + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeader + elementRelationship: associative + keys: + - name + - name: response + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeader + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaders + map: + fields: + - name: actions + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaderActions + default: {} + - name: forwardedHeaderPolicy + type: + scalar: string + - name: headerNameCaseAdjustments + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: uniqueId + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPUniqueIdHeaderPolicy + default: {} +- name: com.github.openshift.api.operator.v1.IngressControllerHTTPUniqueIdHeaderPolicy + map: + fields: + - name: format + type: + scalar: string + - name: name + type: + scalar: string +- name: com.github.openshift.api.operator.v1.IngressControllerLogging + map: + fields: + - name: access + type: + namedType: com.github.openshift.api.operator.v1.AccessLogging +- name: com.github.openshift.api.operator.v1.IngressControllerSetHTTPHeader + map: + fields: + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.IngressControllerSpec + map: + fields: + - name: clientTLS + type: + namedType: com.github.openshift.api.operator.v1.ClientTLS + default: {} + - name: defaultCertificate + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: domain + type: + scalar: string + - name: endpointPublishingStrategy + type: + namedType: com.github.openshift.api.operator.v1.EndpointPublishingStrategy + - name: httpCompression + type: + namedType: com.github.openshift.api.operator.v1.HTTPCompressionPolicy + default: {} + - name: httpEmptyRequestsPolicy + type: + scalar: string + - name: httpErrorCodePages + type: + namedType: com.github.openshift.api.config.v1.ConfigMapNameReference + default: {} + - name: httpHeaders + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaders + - name: idleConnectionTerminationPolicy + type: + scalar: string + default: Immediate + - name: logging + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerLogging + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: nodePlacement + type: + namedType: com.github.openshift.api.operator.v1.NodePlacement + - name: replicas + type: + scalar: numeric + - name: routeAdmission + type: + namedType: com.github.openshift.api.operator.v1.RouteAdmissionPolicy + - name: routeSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: tlsSecurityProfile + type: + namedType: com.github.openshift.api.config.v1.TLSSecurityProfile + - name: tuningOptions + type: + namedType: com.github.openshift.api.operator.v1.IngressControllerTuningOptions + default: {} + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.IngressControllerStatus + map: + fields: + - name: availableReplicas + type: + scalar: numeric + default: 0 + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: domain + type: + scalar: string + default: "" + - name: endpointPublishingStrategy + type: + namedType: com.github.openshift.api.operator.v1.EndpointPublishingStrategy + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: observedGeneration + type: + scalar: numeric + - name: routeSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: selector + type: + scalar: string + default: "" + - name: tlsProfile + type: + namedType: com.github.openshift.api.config.v1.TLSProfileSpec +- name: com.github.openshift.api.operator.v1.IngressControllerTuningOptions + map: + fields: + - name: clientFinTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: clientTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: connectTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: headerBufferBytes + type: + scalar: numeric + - name: headerBufferMaxRewriteBytes + type: + scalar: numeric + - name: healthCheckInterval + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: maxConnections + type: + scalar: numeric + - name: reloadInterval + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: serverFinTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: serverTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: threadCount + type: + scalar: numeric + - name: tlsInspectDelay + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: tunnelTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration +- name: com.github.openshift.api.operator.v1.InsightsOperator + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.InsightsOperatorSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.InsightsOperatorStatus + default: {} +- name: com.github.openshift.api.operator.v1.InsightsOperatorSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.InsightsOperatorStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: gatherStatus + type: + namedType: com.github.openshift.api.operator.v1.GatherStatus + default: {} + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: insightsReport + type: + namedType: com.github.openshift.api.operator.v1.InsightsReport + default: {} + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.InsightsReport + map: + fields: + - name: downloadedAt + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: healthChecks + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.HealthCheck + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.KubeAPIServer + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.KubeAPIServerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.KubeAPIServerStatus + default: {} +- name: com.github.openshift.api.operator.v1.KubeAPIServerSpec + map: + fields: + - name: failedRevisionLimit + type: + scalar: numeric + - name: forceRedeploymentReason + type: + scalar: string + default: "" + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: succeededRevisionLimit + type: + scalar: numeric + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.KubeAPIServerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: latestAvailableRevisionReason + type: + scalar: string + - name: nodeStatuses + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeStatus + elementRelationship: associative + keys: + - nodeName + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: serviceAccountIssuers + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.ServiceAccountIssuerStatus + elementRelationship: atomic + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.KubeControllerManager + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.KubeControllerManagerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.KubeControllerManagerStatus + default: {} +- name: com.github.openshift.api.operator.v1.KubeControllerManagerSpec + map: + fields: + - name: failedRevisionLimit + type: + scalar: numeric + - name: forceRedeploymentReason + type: + scalar: string + default: "" + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: succeededRevisionLimit + type: + scalar: numeric + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ + - name: useMoreSecureServiceCA + type: + scalar: boolean + default: false +- name: com.github.openshift.api.operator.v1.KubeControllerManagerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: latestAvailableRevisionReason + type: + scalar: string + - name: nodeStatuses + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeStatus + elementRelationship: associative + keys: + - nodeName + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.KubeScheduler + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.KubeSchedulerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.KubeSchedulerStatus + default: {} +- name: com.github.openshift.api.operator.v1.KubeSchedulerSpec + map: + fields: + - name: failedRevisionLimit + type: + scalar: numeric + - name: forceRedeploymentReason + type: + scalar: string + default: "" + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: succeededRevisionLimit + type: + scalar: numeric + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.KubeSchedulerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: latestAvailableRevisionReason + type: + scalar: string + - name: nodeStatuses + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeStatus + elementRelationship: associative + keys: + - nodeName + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.KubeStorageVersionMigrator + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.KubeStorageVersionMigratorSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.KubeStorageVersionMigratorStatus + default: {} +- name: com.github.openshift.api.operator.v1.KubeStorageVersionMigratorSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.KubeStorageVersionMigratorStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.LoadBalancerStrategy + map: + fields: + - name: allowedSourceRanges + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: dnsManagementPolicy + type: + scalar: string + default: Managed + - name: providerParameters + type: + namedType: com.github.openshift.api.operator.v1.ProviderLoadBalancerParameters + - name: scope + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.LoggingDestination + map: + fields: + - name: container + type: + namedType: com.github.openshift.api.operator.v1.ContainerLoggingDestinationParameters + - name: syslog + type: + namedType: com.github.openshift.api.operator.v1.SyslogLoggingDestinationParameters + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: container + discriminatorValue: Container + - fieldName: syslog + discriminatorValue: Syslog +- name: com.github.openshift.api.operator.v1.Logo + map: + fields: + - name: themes + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Theme + elementRelationship: associative + keys: + - mode + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.MTUMigration + map: + fields: + - name: machine + type: + namedType: com.github.openshift.api.operator.v1.MTUMigrationValues + - name: network + type: + namedType: com.github.openshift.api.operator.v1.MTUMigrationValues +- name: com.github.openshift.api.operator.v1.MTUMigrationValues + map: + fields: + - name: from + type: + scalar: numeric + - name: to + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.MachineConfiguration + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.MachineConfigurationSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.MachineConfigurationStatus + default: {} +- name: com.github.openshift.api.operator.v1.MachineConfigurationSpec + map: + fields: + - name: failedRevisionLimit + type: + scalar: numeric + - name: forceRedeploymentReason + type: + scalar: string + default: "" + - name: logLevel + type: + scalar: string + - name: managedBootImages + type: + namedType: com.github.openshift.api.operator.v1.ManagedBootImages + default: {} + - name: managementState + type: + scalar: string + default: "" + - name: nodeDisruptionPolicy + type: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyConfig + default: {} + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: succeededRevisionLimit + type: + scalar: numeric + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.MachineConfigurationStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: managedBootImagesStatus + type: + namedType: com.github.openshift.api.operator.v1.ManagedBootImages + default: {} + - name: nodeDisruptionPolicyStatus + type: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatus + default: {} + - name: observedGeneration + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.MachineManager + map: + fields: + - name: apiGroup + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" + - name: selection + type: + namedType: com.github.openshift.api.operator.v1.MachineManagerSelector + default: {} +- name: com.github.openshift.api.operator.v1.MachineManagerSelector + map: + fields: + - name: mode + type: + scalar: string + default: "" + - name: partial + type: + namedType: com.github.openshift.api.operator.v1.PartialSelector + unions: + - discriminator: mode + fields: + - fieldName: partial + discriminatorValue: Partial +- name: com.github.openshift.api.operator.v1.ManagedBootImages + map: + fields: + - name: machineManagers + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.MachineManager + elementRelationship: associative + keys: + - resource + - apiGroup +- name: com.github.openshift.api.operator.v1.NetFlowConfig + map: + fields: + - name: collectors + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.Network + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.NetworkSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.NetworkStatus + default: {} +- name: com.github.openshift.api.operator.v1.NetworkMigration + map: + fields: + - name: features + type: + namedType: com.github.openshift.api.operator.v1.FeaturesMigration + - name: mode + type: + scalar: string + - name: mtu + type: + namedType: com.github.openshift.api.operator.v1.MTUMigration + - name: networkType + type: + scalar: string +- name: com.github.openshift.api.operator.v1.NetworkSpec + map: + fields: + - name: additionalNetworks + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.AdditionalNetworkDefinition + elementRelationship: associative + keys: + - name + - name: additionalRoutingCapabilities + type: + namedType: com.github.openshift.api.operator.v1.AdditionalRoutingCapabilities + - name: clusterNetwork + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.ClusterNetworkEntry + elementRelationship: atomic + - name: defaultNetwork + type: + namedType: com.github.openshift.api.operator.v1.DefaultNetworkDefinition + default: {} + - name: deployKubeProxy + type: + scalar: boolean + - name: disableMultiNetwork + type: + scalar: boolean + - name: disableNetworkDiagnostics + type: + scalar: boolean + default: false + - name: exportNetworkFlows + type: + namedType: com.github.openshift.api.operator.v1.ExportNetworkFlows + - name: kubeProxyConfig + type: + namedType: com.github.openshift.api.operator.v1.ProxyConfig + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: migration + type: + namedType: com.github.openshift.api.operator.v1.NetworkMigration + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: serviceNetwork + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ + - name: useMultiNetworkPolicy + type: + scalar: boolean +- name: com.github.openshift.api.operator.v1.NetworkStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyClusterStatus + map: + fields: + - name: files + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusFile + elementRelationship: associative + keys: + - path + - name: sshkey + type: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusSSHKey + default: {} + - name: units + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusUnit + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyConfig + map: + fields: + - name: files + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecFile + elementRelationship: associative + keys: + - path + - name: sshkey + type: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecSSHKey + default: {} + - name: units + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecUnit + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecAction + map: + fields: + - name: reload + type: + namedType: com.github.openshift.api.operator.v1.ReloadService + - name: restart + type: + namedType: com.github.openshift.api.operator.v1.RestartService + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: reload + discriminatorValue: Reload + - fieldName: restart + discriminatorValue: Restart +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecFile + map: + fields: + - name: actions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecAction + elementRelationship: atomic + - name: path + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecSSHKey + map: + fields: + - name: actions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecAction + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecUnit + map: + fields: + - name: actions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicySpecAction + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatus + map: + fields: + - name: clusterPolicies + type: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyClusterStatus + default: {} +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusAction + map: + fields: + - name: reload + type: + namedType: com.github.openshift.api.operator.v1.ReloadService + - name: restart + type: + namedType: com.github.openshift.api.operator.v1.RestartService + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: reload + discriminatorValue: Reload + - fieldName: restart + discriminatorValue: Restart +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusFile + map: + fields: + - name: actions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusAction + elementRelationship: atomic + - name: path + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusSSHKey + map: + fields: + - name: actions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusAction + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusUnit + map: + fields: + - name: actions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.NodeDisruptionPolicyStatusAction + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.NodePlacement + map: + fields: + - name: nodeSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.NodePortStrategy + map: + fields: + - name: protocol + type: + scalar: string +- name: com.github.openshift.api.operator.v1.NodeStatus + map: + fields: + - name: currentRevision + type: + scalar: numeric + - name: lastFailedCount + type: + scalar: numeric + - name: lastFailedReason + type: + scalar: string + - name: lastFailedRevision + type: + scalar: numeric + - name: lastFailedRevisionErrors + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: lastFailedTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastFallbackCount + type: + scalar: numeric + - name: nodeName + type: + scalar: string + default: "" + - name: targetRevision + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.OAuthAPIServerStatus + map: + fields: + - name: latestAvailableRevision + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.OLM + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.OLMSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.OLMStatus + default: {} +- name: com.github.openshift.api.operator.v1.OLMSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.OLMStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.OVNKubernetesConfig + map: + fields: + - name: egressIPConfig + type: + namedType: com.github.openshift.api.operator.v1.EgressIPConfig + default: {} + - name: gatewayConfig + type: + namedType: com.github.openshift.api.operator.v1.GatewayConfig + - name: genevePort + type: + scalar: numeric + - name: hybridOverlayConfig + type: + namedType: com.github.openshift.api.operator.v1.HybridOverlayConfig + - name: ipsecConfig + type: + namedType: com.github.openshift.api.operator.v1.IPsecConfig + default: + mode: Disabled + - name: ipv4 + type: + namedType: com.github.openshift.api.operator.v1.IPv4OVNKubernetesConfig + - name: ipv6 + type: + namedType: com.github.openshift.api.operator.v1.IPv6OVNKubernetesConfig + - name: mtu + type: + scalar: numeric + - name: policyAuditConfig + type: + namedType: com.github.openshift.api.operator.v1.PolicyAuditConfig + - name: routeAdvertisements + type: + scalar: string + - name: v4InternalSubnet + type: + scalar: string + - name: v6InternalSubnet + type: + scalar: string +- name: com.github.openshift.api.operator.v1.OpenShiftAPIServer + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.OpenShiftAPIServerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.OpenShiftAPIServerStatus + default: {} +- name: com.github.openshift.api.operator.v1.OpenShiftAPIServerSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.OpenShiftAPIServerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.OpenShiftControllerManager + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.OpenShiftControllerManagerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.OpenShiftControllerManagerStatus + default: {} +- name: com.github.openshift.api.operator.v1.OpenShiftControllerManagerSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.OpenShiftControllerManagerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.OpenShiftSDNConfig + map: + fields: + - name: enableUnidling + type: + scalar: boolean + - name: mode + type: + scalar: string + default: "" + - name: mtu + type: + scalar: numeric + - name: useExternalOpenvswitch + type: + scalar: boolean + - name: vxlanPort + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.OpenStackLoadBalancerParameters + map: + fields: + - name: floatingIP + type: + scalar: string +- name: com.github.openshift.api.operator.v1.OperatorCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.PartialSelector + map: + fields: + - name: machineResourceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: com.github.openshift.api.operator.v1.Perspective + map: + fields: + - name: id + type: + scalar: string + default: "" + - name: pinnedResources + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.PinnedResourceReference + elementRelationship: atomic + - name: visibility + type: + namedType: com.github.openshift.api.operator.v1.PerspectiveVisibility + default: {} +- name: com.github.openshift.api.operator.v1.PerspectiveVisibility + map: + fields: + - name: accessReview + type: + namedType: com.github.openshift.api.operator.v1.ResourceAttributesAccessReview + - name: state + type: + scalar: string + default: "" + unions: + - discriminator: state + fields: + - fieldName: accessReview + discriminatorValue: AccessReview +- name: com.github.openshift.api.operator.v1.PinnedResourceReference + map: + fields: + - name: group + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" + - name: version + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.PolicyAuditConfig + map: + fields: + - name: destination + type: + scalar: string + - name: maxFileSize + type: + scalar: numeric + - name: maxLogFiles + type: + scalar: numeric + - name: rateLimit + type: + scalar: numeric + - name: syslogFacility + type: + scalar: string +- name: com.github.openshift.api.operator.v1.PrivateStrategy + map: + fields: + - name: protocol + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ProjectAccess + map: + fields: + - name: availableClusterRoles + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.ProviderLoadBalancerParameters + map: + fields: + - name: aws + type: + namedType: com.github.openshift.api.operator.v1.AWSLoadBalancerParameters + - name: gcp + type: + namedType: com.github.openshift.api.operator.v1.GCPLoadBalancerParameters + - name: ibm + type: + namedType: com.github.openshift.api.operator.v1.IBMLoadBalancerParameters + - name: openstack + type: + namedType: com.github.openshift.api.operator.v1.OpenStackLoadBalancerParameters + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: aws + discriminatorValue: AWS + - fieldName: gcp + discriminatorValue: GCP + - fieldName: ibm + discriminatorValue: IBM + - fieldName: openstack + discriminatorValue: OpenStack +- name: com.github.openshift.api.operator.v1.ProxyConfig + map: + fields: + - name: bindAddress + type: + scalar: string + - name: iptablesSyncPeriod + type: + scalar: string + - name: proxyArguments + type: + map: + elementType: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.QuickStarts + map: + fields: + - name: disabled + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.ReloadService + map: + fields: + - name: serviceName + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.ResourceAttributesAccessReview + map: + fields: + - name: missing + type: + list: + elementType: + namedType: io.k8s.api.authorization.v1.ResourceAttributes + elementRelationship: atomic + - name: required + type: + list: + elementType: + namedType: io.k8s.api.authorization.v1.ResourceAttributes + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.RestartService + map: + fields: + - name: serviceName + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.RouteAdmissionPolicy + map: + fields: + - name: namespaceOwnership + type: + scalar: string + - name: wildcardPolicy + type: + scalar: string +- name: com.github.openshift.api.operator.v1.SFlowConfig + map: + fields: + - name: collectors + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.Server + map: + fields: + - name: forwardPlugin + type: + namedType: com.github.openshift.api.operator.v1.ForwardPlugin + default: {} + - name: name + type: + scalar: string + default: "" + - name: zones + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.ServiceAccountIssuerStatus + map: + fields: + - name: expirationTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.ServiceCA + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.ServiceCASpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.ServiceCAStatus + default: {} +- name: com.github.openshift.api.operator.v1.ServiceCASpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.ServiceCAStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ServiceCatalogAPIServer + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.ServiceCatalogAPIServerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.ServiceCatalogAPIServerStatus + default: {} +- name: com.github.openshift.api.operator.v1.ServiceCatalogAPIServerSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.ServiceCatalogAPIServerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ServiceCatalogControllerManager + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.ServiceCatalogControllerManagerSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.ServiceCatalogControllerManagerStatus + default: {} +- name: com.github.openshift.api.operator.v1.ServiceCatalogControllerManagerSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.ServiceCatalogControllerManagerStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.SimpleMacvlanConfig + map: + fields: + - name: ipamConfig + type: + namedType: com.github.openshift.api.operator.v1.IPAMConfig + - name: master + type: + scalar: string + - name: mode + type: + scalar: string + - name: mtu + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1.StaticIPAMAddresses + map: + fields: + - name: address + type: + scalar: string + default: "" + - name: gateway + type: + scalar: string +- name: com.github.openshift.api.operator.v1.StaticIPAMConfig + map: + fields: + - name: addresses + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.StaticIPAMAddresses + elementRelationship: atomic + - name: dns + type: + namedType: com.github.openshift.api.operator.v1.StaticIPAMDNS + - name: routes + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.StaticIPAMRoutes + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.StaticIPAMDNS + map: + fields: + - name: domain + type: + scalar: string + - name: nameservers + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: search + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.StaticIPAMRoutes + map: + fields: + - name: destination + type: + scalar: string + default: "" + - name: gateway + type: + scalar: string +- name: com.github.openshift.api.operator.v1.StatuspageProvider + map: + fields: + - name: pageID + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.Storage + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.StorageSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.StorageStatus + default: {} +- name: com.github.openshift.api.operator.v1.StorageSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ + - name: vsphereStorageDriver + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.StorageStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1.SyslogLoggingDestinationParameters + map: + fields: + - name: address + type: + scalar: string + default: "" + - name: facility + type: + scalar: string + - name: maxLength + type: + scalar: numeric + - name: port + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.operator.v1.Theme + map: + fields: + - name: mode + type: + scalar: string + default: "" + - name: source + type: + namedType: com.github.openshift.api.operator.v1.FileReferenceSource + default: {} +- name: com.github.openshift.api.operator.v1.Upstream + map: + fields: + - name: address + type: + scalar: string + - name: port + type: + scalar: numeric + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.UpstreamResolvers + map: + fields: + - name: policy + type: + scalar: string + - name: protocolStrategy + type: + scalar: string + default: "" + - name: transportConfig + type: + namedType: com.github.openshift.api.operator.v1.DNSTransportConfig + default: {} + - name: upstreams + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Upstream + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.VSphereCSIDriverConfigSpec + map: + fields: + - name: globalMaxSnapshotsPerBlockVolume + type: + scalar: numeric + - name: granularMaxSnapshotsPerBlockVolumeInVSAN + type: + scalar: numeric + - name: granularMaxSnapshotsPerBlockVolumeInVVOL + type: + scalar: numeric + - name: maxAllowedBlockVolumesPerNode + type: + scalar: numeric + - name: topologyCategories + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1alpha1.BackupJobReference + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperator + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus + default: {} +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec + map: + fields: + - name: operatorLogLevel + type: + scalar: string +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus + map: + fields: + - name: observedGeneration + type: + scalar: numeric +- name: com.github.openshift.api.operator.v1alpha1.EtcdBackup + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1alpha1.EtcdBackupSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1alpha1.EtcdBackupStatus + default: {} +- name: com.github.openshift.api.operator.v1alpha1.EtcdBackupSpec + map: + fields: + - name: pvcName + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1alpha1.EtcdBackupStatus + map: + fields: + - name: backupJob + type: + namedType: com.github.openshift.api.operator.v1alpha1.BackupJobReference + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type +- name: com.github.openshift.api.operator.v1alpha1.ImageContentSourcePolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1alpha1.ImageContentSourcePolicySpec + default: {} +- name: com.github.openshift.api.operator.v1alpha1.ImageContentSourcePolicySpec + map: + fields: + - name: repositoryDigestMirrors + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1alpha1.RepositoryDigestMirrors + elementRelationship: atomic +- name: com.github.openshift.api.operator.v1alpha1.OLM + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1alpha1.OLMSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1alpha1.OLMStatus + default: {} +- name: com.github.openshift.api.operator.v1alpha1.OLMSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1alpha1.OLMStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string +- name: com.github.openshift.api.operator.v1alpha1.RepositoryDigestMirrors + map: + fields: + - name: mirrors + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: source + type: + scalar: string + default: "" +- name: io.k8s.api.authorization.v1.FieldSelectorAttributes + map: + fields: + - name: rawSelector + type: + scalar: string + - name: requirements + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement + elementRelationship: atomic +- name: io.k8s.api.authorization.v1.LabelSelectorAttributes + map: + fields: + - name: rawSelector + type: + scalar: string + - name: requirements + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + elementRelationship: atomic +- name: io.k8s.api.authorization.v1.ResourceAttributes + map: + fields: + - name: fieldSelector + type: + namedType: io.k8s.api.authorization.v1.FieldSelectorAttributes + - name: group + type: + scalar: string + - name: labelSelector + type: + namedType: io.k8s.api.authorization.v1.LabelSelectorAttributes + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + - name: subresource + type: + scalar: string + - name: verb + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.core.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.Toleration + map: + fields: + - name: effect + type: + scalar: string + - name: key + type: + scalar: string + - name: operator + type: + scalar: string + - name: tolerationSeconds + type: + scalar: numeric + - name: value + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + default: "" + - name: observedGeneration + type: + scalar: numeric + - name: reason + type: + scalar: string + default: "" + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + map: + fields: + - name: matchExpressions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + elementRelationship: atomic + - name: matchLabels + type: + map: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: operator + type: + scalar: string + default: "" + - name: values + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.runtime.RawExtension + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/accesslogging.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/accesslogging.go new file mode 100644 index 0000000000..7f74f828c1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/accesslogging.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AccessLoggingApplyConfiguration represents a declarative configuration of the AccessLogging type for use +// with apply. +type AccessLoggingApplyConfiguration struct { + Destination *LoggingDestinationApplyConfiguration `json:"destination,omitempty"` + HttpLogFormat *string `json:"httpLogFormat,omitempty"` + HTTPCaptureHeaders *IngressControllerCaptureHTTPHeadersApplyConfiguration `json:"httpCaptureHeaders,omitempty"` + HTTPCaptureCookies []IngressControllerCaptureHTTPCookieApplyConfiguration `json:"httpCaptureCookies,omitempty"` + LogEmptyRequests *operatorv1.LoggingPolicy `json:"logEmptyRequests,omitempty"` +} + +// AccessLoggingApplyConfiguration constructs a declarative configuration of the AccessLogging type for use with +// apply. +func AccessLogging() *AccessLoggingApplyConfiguration { + return &AccessLoggingApplyConfiguration{} +} + +// WithDestination sets the Destination field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Destination field is set to the value of the last call. +func (b *AccessLoggingApplyConfiguration) WithDestination(value *LoggingDestinationApplyConfiguration) *AccessLoggingApplyConfiguration { + b.Destination = value + return b +} + +// WithHttpLogFormat sets the HttpLogFormat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HttpLogFormat field is set to the value of the last call. +func (b *AccessLoggingApplyConfiguration) WithHttpLogFormat(value string) *AccessLoggingApplyConfiguration { + b.HttpLogFormat = &value + return b +} + +// WithHTTPCaptureHeaders sets the HTTPCaptureHeaders field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPCaptureHeaders field is set to the value of the last call. +func (b *AccessLoggingApplyConfiguration) WithHTTPCaptureHeaders(value *IngressControllerCaptureHTTPHeadersApplyConfiguration) *AccessLoggingApplyConfiguration { + b.HTTPCaptureHeaders = value + return b +} + +// WithHTTPCaptureCookies adds the given value to the HTTPCaptureCookies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HTTPCaptureCookies field. +func (b *AccessLoggingApplyConfiguration) WithHTTPCaptureCookies(values ...*IngressControllerCaptureHTTPCookieApplyConfiguration) *AccessLoggingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHTTPCaptureCookies") + } + b.HTTPCaptureCookies = append(b.HTTPCaptureCookies, *values[i]) + } + return b +} + +// WithLogEmptyRequests sets the LogEmptyRequests field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogEmptyRequests field is set to the value of the last call. +func (b *AccessLoggingApplyConfiguration) WithLogEmptyRequests(value operatorv1.LoggingPolicy) *AccessLoggingApplyConfiguration { + b.LogEmptyRequests = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go new file mode 100644 index 0000000000..a43b86656a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalnetworkdefinition.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AdditionalNetworkDefinitionApplyConfiguration represents a declarative configuration of the AdditionalNetworkDefinition type for use +// with apply. +type AdditionalNetworkDefinitionApplyConfiguration struct { + Type *operatorv1.NetworkType `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + RawCNIConfig *string `json:"rawCNIConfig,omitempty"` + SimpleMacvlanConfig *SimpleMacvlanConfigApplyConfiguration `json:"simpleMacvlanConfig,omitempty"` +} + +// AdditionalNetworkDefinitionApplyConfiguration constructs a declarative configuration of the AdditionalNetworkDefinition type for use with +// apply. +func AdditionalNetworkDefinition() *AdditionalNetworkDefinitionApplyConfiguration { + return &AdditionalNetworkDefinitionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AdditionalNetworkDefinitionApplyConfiguration) WithType(value operatorv1.NetworkType) *AdditionalNetworkDefinitionApplyConfiguration { + b.Type = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AdditionalNetworkDefinitionApplyConfiguration) WithName(value string) *AdditionalNetworkDefinitionApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *AdditionalNetworkDefinitionApplyConfiguration) WithNamespace(value string) *AdditionalNetworkDefinitionApplyConfiguration { + b.Namespace = &value + return b +} + +// WithRawCNIConfig sets the RawCNIConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RawCNIConfig field is set to the value of the last call. +func (b *AdditionalNetworkDefinitionApplyConfiguration) WithRawCNIConfig(value string) *AdditionalNetworkDefinitionApplyConfiguration { + b.RawCNIConfig = &value + return b +} + +// WithSimpleMacvlanConfig sets the SimpleMacvlanConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SimpleMacvlanConfig field is set to the value of the last call. +func (b *AdditionalNetworkDefinitionApplyConfiguration) WithSimpleMacvlanConfig(value *SimpleMacvlanConfigApplyConfiguration) *AdditionalNetworkDefinitionApplyConfiguration { + b.SimpleMacvlanConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go new file mode 100644 index 0000000000..95a77d1165 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/additionalroutingcapabilities.go @@ -0,0 +1,29 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AdditionalRoutingCapabilitiesApplyConfiguration represents a declarative configuration of the AdditionalRoutingCapabilities type for use +// with apply. +type AdditionalRoutingCapabilitiesApplyConfiguration struct { + Providers []operatorv1.RoutingCapabilitiesProvider `json:"providers,omitempty"` +} + +// AdditionalRoutingCapabilitiesApplyConfiguration constructs a declarative configuration of the AdditionalRoutingCapabilities type for use with +// apply. +func AdditionalRoutingCapabilities() *AdditionalRoutingCapabilitiesApplyConfiguration { + return &AdditionalRoutingCapabilitiesApplyConfiguration{} +} + +// WithProviders adds the given value to the Providers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Providers field. +func (b *AdditionalRoutingCapabilitiesApplyConfiguration) WithProviders(values ...operatorv1.RoutingCapabilitiesProvider) *AdditionalRoutingCapabilitiesApplyConfiguration { + for i := range values { + b.Providers = append(b.Providers, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/addpage.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/addpage.go new file mode 100644 index 0000000000..34fe570b50 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/addpage.go @@ -0,0 +1,25 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AddPageApplyConfiguration represents a declarative configuration of the AddPage type for use +// with apply. +type AddPageApplyConfiguration struct { + DisabledActions []string `json:"disabledActions,omitempty"` +} + +// AddPageApplyConfiguration constructs a declarative configuration of the AddPage type for use with +// apply. +func AddPage() *AddPageApplyConfiguration { + return &AddPageApplyConfiguration{} +} + +// WithDisabledActions adds the given value to the DisabledActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DisabledActions field. +func (b *AddPageApplyConfiguration) WithDisabledActions(values ...string) *AddPageApplyConfiguration { + for i := range values { + b.DisabledActions = append(b.DisabledActions, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go new file mode 100644 index 0000000000..ec839a2ff7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AuthenticationApplyConfiguration represents a declarative configuration of the Authentication type for use +// with apply. +type AuthenticationApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"` + Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` +} + +// Authentication constructs a declarative configuration of the Authentication type for use with +// apply. +func Authentication(name string) *AuthenticationApplyConfiguration { + b := &AuthenticationApplyConfiguration{} + b.WithName(name) + b.WithKind("Authentication") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractAuthentication extracts the applied configuration owned by fieldManager from +// authentication. If no managedFields are found in authentication for fieldManager, a +// AuthenticationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// authentication must be a unmodified Authentication API object that was retrieved from the Kubernetes API. +// ExtractAuthentication provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractAuthentication(authentication *operatorv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { + return extractAuthentication(authentication, fieldManager, "") +} + +// ExtractAuthenticationStatus is the same as ExtractAuthentication except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractAuthenticationStatus(authentication *operatorv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) { + return extractAuthentication(authentication, fieldManager, "status") +} + +func extractAuthentication(authentication *operatorv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) { + b := &AuthenticationApplyConfiguration{} + err := managedfields.ExtractInto(authentication, internal.Parser().Type("com.github.openshift.api.operator.v1.Authentication"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(authentication.Name) + + b.WithKind("Authentication") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithKind(value string) *AuthenticationApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *AuthenticationApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithName(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithGeneration(value int64) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *AuthenticationApplyConfiguration) WithFinalizers(values ...string) *AuthenticationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *AuthenticationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithSpec(value *AuthenticationSpecApplyConfiguration) *AuthenticationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatusApplyConfiguration) *AuthenticationApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go new file mode 100644 index 0000000000..ac90816bce --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// AuthenticationSpecApplyConfiguration represents a declarative configuration of the AuthenticationSpec type for use +// with apply. +type AuthenticationSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// AuthenticationSpecApplyConfiguration constructs a declarative configuration of the AuthenticationSpec type for use with +// apply. +func AuthenticationSpec() *AuthenticationSpecApplyConfiguration { + return &AuthenticationSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *AuthenticationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *AuthenticationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *AuthenticationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *AuthenticationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *AuthenticationSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *AuthenticationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go new file mode 100644 index 0000000000..ee84050a40 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authenticationstatus.go @@ -0,0 +1,82 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AuthenticationStatusApplyConfiguration represents a declarative configuration of the AuthenticationStatus type for use +// with apply. +type AuthenticationStatusApplyConfiguration struct { + OAuthAPIServer *OAuthAPIServerStatusApplyConfiguration `json:"oauthAPIServer,omitempty"` + OperatorStatusApplyConfiguration `json:",inline"` +} + +// AuthenticationStatusApplyConfiguration constructs a declarative configuration of the AuthenticationStatus type for use with +// apply. +func AuthenticationStatus() *AuthenticationStatusApplyConfiguration { + return &AuthenticationStatusApplyConfiguration{} +} + +// WithOAuthAPIServer sets the OAuthAPIServer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OAuthAPIServer field is set to the value of the last call. +func (b *AuthenticationStatusApplyConfiguration) WithOAuthAPIServer(value *OAuthAPIServerStatusApplyConfiguration) *AuthenticationStatusApplyConfiguration { + b.OAuthAPIServer = value + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *AuthenticationStatusApplyConfiguration) WithObservedGeneration(value int64) *AuthenticationStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *AuthenticationStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *AuthenticationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *AuthenticationStatusApplyConfiguration) WithVersion(value string) *AuthenticationStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *AuthenticationStatusApplyConfiguration) WithReadyReplicas(value int32) *AuthenticationStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *AuthenticationStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *AuthenticationStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *AuthenticationStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *AuthenticationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go new file mode 100644 index 0000000000..b490ac0e5b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsclassicloadbalancerparameters.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSClassicLoadBalancerParametersApplyConfiguration represents a declarative configuration of the AWSClassicLoadBalancerParameters type for use +// with apply. +type AWSClassicLoadBalancerParametersApplyConfiguration struct { + ConnectionIdleTimeout *metav1.Duration `json:"connectionIdleTimeout,omitempty"` + Subnets *AWSSubnetsApplyConfiguration `json:"subnets,omitempty"` +} + +// AWSClassicLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the AWSClassicLoadBalancerParameters type for use with +// apply. +func AWSClassicLoadBalancerParameters() *AWSClassicLoadBalancerParametersApplyConfiguration { + return &AWSClassicLoadBalancerParametersApplyConfiguration{} +} + +// WithConnectionIdleTimeout sets the ConnectionIdleTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConnectionIdleTimeout field is set to the value of the last call. +func (b *AWSClassicLoadBalancerParametersApplyConfiguration) WithConnectionIdleTimeout(value metav1.Duration) *AWSClassicLoadBalancerParametersApplyConfiguration { + b.ConnectionIdleTimeout = &value + return b +} + +// WithSubnets sets the Subnets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subnets field is set to the value of the last call. +func (b *AWSClassicLoadBalancerParametersApplyConfiguration) WithSubnets(value *AWSSubnetsApplyConfiguration) *AWSClassicLoadBalancerParametersApplyConfiguration { + b.Subnets = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go new file mode 100644 index 0000000000..5579ef5ee5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSCSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the AWSCSIDriverConfigSpec type for use +// with apply. +type AWSCSIDriverConfigSpecApplyConfiguration struct { + KMSKeyARN *string `json:"kmsKeyARN,omitempty"` + EFSVolumeMetrics *AWSEFSVolumeMetricsApplyConfiguration `json:"efsVolumeMetrics,omitempty"` +} + +// AWSCSIDriverConfigSpecApplyConfiguration constructs a declarative configuration of the AWSCSIDriverConfigSpec type for use with +// apply. +func AWSCSIDriverConfigSpec() *AWSCSIDriverConfigSpecApplyConfiguration { + return &AWSCSIDriverConfigSpecApplyConfiguration{} +} + +// WithKMSKeyARN sets the KMSKeyARN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KMSKeyARN field is set to the value of the last call. +func (b *AWSCSIDriverConfigSpecApplyConfiguration) WithKMSKeyARN(value string) *AWSCSIDriverConfigSpecApplyConfiguration { + b.KMSKeyARN = &value + return b +} + +// WithEFSVolumeMetrics sets the EFSVolumeMetrics field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EFSVolumeMetrics field is set to the value of the last call. +func (b *AWSCSIDriverConfigSpecApplyConfiguration) WithEFSVolumeMetrics(value *AWSEFSVolumeMetricsApplyConfiguration) *AWSCSIDriverConfigSpecApplyConfiguration { + b.EFSVolumeMetrics = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go new file mode 100644 index 0000000000..011bbeaffc --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetrics.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AWSEFSVolumeMetricsApplyConfiguration represents a declarative configuration of the AWSEFSVolumeMetrics type for use +// with apply. +type AWSEFSVolumeMetricsApplyConfiguration struct { + State *operatorv1.AWSEFSVolumeMetricsState `json:"state,omitempty"` + RecursiveWalk *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration `json:"recursiveWalk,omitempty"` +} + +// AWSEFSVolumeMetricsApplyConfiguration constructs a declarative configuration of the AWSEFSVolumeMetrics type for use with +// apply. +func AWSEFSVolumeMetrics() *AWSEFSVolumeMetricsApplyConfiguration { + return &AWSEFSVolumeMetricsApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *AWSEFSVolumeMetricsApplyConfiguration) WithState(value operatorv1.AWSEFSVolumeMetricsState) *AWSEFSVolumeMetricsApplyConfiguration { + b.State = &value + return b +} + +// WithRecursiveWalk sets the RecursiveWalk field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveWalk field is set to the value of the last call. +func (b *AWSEFSVolumeMetricsApplyConfiguration) WithRecursiveWalk(value *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration) *AWSEFSVolumeMetricsApplyConfiguration { + b.RecursiveWalk = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetricsrecursivewalkconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetricsrecursivewalkconfig.go new file mode 100644 index 0000000000..23d68c209f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsefsvolumemetricsrecursivewalkconfig.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration represents a declarative configuration of the AWSEFSVolumeMetricsRecursiveWalkConfig type for use +// with apply. +type AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration struct { + RefreshPeriodMinutes *int32 `json:"refreshPeriodMinutes,omitempty"` + FSRateLimit *int32 `json:"fsRateLimit,omitempty"` +} + +// AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration constructs a declarative configuration of the AWSEFSVolumeMetricsRecursiveWalkConfig type for use with +// apply. +func AWSEFSVolumeMetricsRecursiveWalkConfig() *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration { + return &AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration{} +} + +// WithRefreshPeriodMinutes sets the RefreshPeriodMinutes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RefreshPeriodMinutes field is set to the value of the last call. +func (b *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration) WithRefreshPeriodMinutes(value int32) *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration { + b.RefreshPeriodMinutes = &value + return b +} + +// WithFSRateLimit sets the FSRateLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FSRateLimit field is set to the value of the last call. +func (b *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration) WithFSRateLimit(value int32) *AWSEFSVolumeMetricsRecursiveWalkConfigApplyConfiguration { + b.FSRateLimit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go new file mode 100644 index 0000000000..8805b7eece --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsloadbalancerparameters.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AWSLoadBalancerParametersApplyConfiguration represents a declarative configuration of the AWSLoadBalancerParameters type for use +// with apply. +type AWSLoadBalancerParametersApplyConfiguration struct { + Type *operatorv1.AWSLoadBalancerType `json:"type,omitempty"` + ClassicLoadBalancerParameters *AWSClassicLoadBalancerParametersApplyConfiguration `json:"classicLoadBalancer,omitempty"` + NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParametersApplyConfiguration `json:"networkLoadBalancer,omitempty"` +} + +// AWSLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the AWSLoadBalancerParameters type for use with +// apply. +func AWSLoadBalancerParameters() *AWSLoadBalancerParametersApplyConfiguration { + return &AWSLoadBalancerParametersApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AWSLoadBalancerParametersApplyConfiguration) WithType(value operatorv1.AWSLoadBalancerType) *AWSLoadBalancerParametersApplyConfiguration { + b.Type = &value + return b +} + +// WithClassicLoadBalancerParameters sets the ClassicLoadBalancerParameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClassicLoadBalancerParameters field is set to the value of the last call. +func (b *AWSLoadBalancerParametersApplyConfiguration) WithClassicLoadBalancerParameters(value *AWSClassicLoadBalancerParametersApplyConfiguration) *AWSLoadBalancerParametersApplyConfiguration { + b.ClassicLoadBalancerParameters = value + return b +} + +// WithNetworkLoadBalancerParameters sets the NetworkLoadBalancerParameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkLoadBalancerParameters field is set to the value of the last call. +func (b *AWSLoadBalancerParametersApplyConfiguration) WithNetworkLoadBalancerParameters(value *AWSNetworkLoadBalancerParametersApplyConfiguration) *AWSLoadBalancerParametersApplyConfiguration { + b.NetworkLoadBalancerParameters = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsnetworkloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsnetworkloadbalancerparameters.go new file mode 100644 index 0000000000..c611c53184 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awsnetworkloadbalancerparameters.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AWSNetworkLoadBalancerParametersApplyConfiguration represents a declarative configuration of the AWSNetworkLoadBalancerParameters type for use +// with apply. +type AWSNetworkLoadBalancerParametersApplyConfiguration struct { + Subnets *AWSSubnetsApplyConfiguration `json:"subnets,omitempty"` + EIPAllocations []operatorv1.EIPAllocation `json:"eipAllocations,omitempty"` +} + +// AWSNetworkLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the AWSNetworkLoadBalancerParameters type for use with +// apply. +func AWSNetworkLoadBalancerParameters() *AWSNetworkLoadBalancerParametersApplyConfiguration { + return &AWSNetworkLoadBalancerParametersApplyConfiguration{} +} + +// WithSubnets sets the Subnets field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subnets field is set to the value of the last call. +func (b *AWSNetworkLoadBalancerParametersApplyConfiguration) WithSubnets(value *AWSSubnetsApplyConfiguration) *AWSNetworkLoadBalancerParametersApplyConfiguration { + b.Subnets = value + return b +} + +// WithEIPAllocations adds the given value to the EIPAllocations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the EIPAllocations field. +func (b *AWSNetworkLoadBalancerParametersApplyConfiguration) WithEIPAllocations(values ...operatorv1.EIPAllocation) *AWSNetworkLoadBalancerParametersApplyConfiguration { + for i := range values { + b.EIPAllocations = append(b.EIPAllocations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go new file mode 100644 index 0000000000..f127ac4ca4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awssubnets.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// AWSSubnetsApplyConfiguration represents a declarative configuration of the AWSSubnets type for use +// with apply. +type AWSSubnetsApplyConfiguration struct { + IDs []operatorv1.AWSSubnetID `json:"ids,omitempty"` + Names []operatorv1.AWSSubnetName `json:"names,omitempty"` +} + +// AWSSubnetsApplyConfiguration constructs a declarative configuration of the AWSSubnets type for use with +// apply. +func AWSSubnets() *AWSSubnetsApplyConfiguration { + return &AWSSubnetsApplyConfiguration{} +} + +// WithIDs adds the given value to the IDs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IDs field. +func (b *AWSSubnetsApplyConfiguration) WithIDs(values ...operatorv1.AWSSubnetID) *AWSSubnetsApplyConfiguration { + for i := range values { + b.IDs = append(b.IDs, values[i]) + } + return b +} + +// WithNames adds the given value to the Names field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Names field. +func (b *AWSSubnetsApplyConfiguration) WithNames(values ...operatorv1.AWSSubnetName) *AWSSubnetsApplyConfiguration { + for i := range values { + b.Names = append(b.Names, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go new file mode 100644 index 0000000000..6117a337a4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureCSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the AzureCSIDriverConfigSpec type for use +// with apply. +type AzureCSIDriverConfigSpecApplyConfiguration struct { + DiskEncryptionSet *AzureDiskEncryptionSetApplyConfiguration `json:"diskEncryptionSet,omitempty"` +} + +// AzureCSIDriverConfigSpecApplyConfiguration constructs a declarative configuration of the AzureCSIDriverConfigSpec type for use with +// apply. +func AzureCSIDriverConfigSpec() *AzureCSIDriverConfigSpecApplyConfiguration { + return &AzureCSIDriverConfigSpecApplyConfiguration{} +} + +// WithDiskEncryptionSet sets the DiskEncryptionSet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DiskEncryptionSet field is set to the value of the last call. +func (b *AzureCSIDriverConfigSpecApplyConfiguration) WithDiskEncryptionSet(value *AzureDiskEncryptionSetApplyConfiguration) *AzureCSIDriverConfigSpecApplyConfiguration { + b.DiskEncryptionSet = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go new file mode 100644 index 0000000000..917bf2cfd8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureDiskEncryptionSetApplyConfiguration represents a declarative configuration of the AzureDiskEncryptionSet type for use +// with apply. +type AzureDiskEncryptionSetApplyConfiguration struct { + SubscriptionID *string `json:"subscriptionID,omitempty"` + ResourceGroup *string `json:"resourceGroup,omitempty"` + Name *string `json:"name,omitempty"` +} + +// AzureDiskEncryptionSetApplyConfiguration constructs a declarative configuration of the AzureDiskEncryptionSet type for use with +// apply. +func AzureDiskEncryptionSet() *AzureDiskEncryptionSetApplyConfiguration { + return &AzureDiskEncryptionSetApplyConfiguration{} +} + +// WithSubscriptionID sets the SubscriptionID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SubscriptionID field is set to the value of the last call. +func (b *AzureDiskEncryptionSetApplyConfiguration) WithSubscriptionID(value string) *AzureDiskEncryptionSetApplyConfiguration { + b.SubscriptionID = &value + return b +} + +// WithResourceGroup sets the ResourceGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroup field is set to the value of the last call. +func (b *AzureDiskEncryptionSetApplyConfiguration) WithResourceGroup(value string) *AzureDiskEncryptionSetApplyConfiguration { + b.ResourceGroup = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AzureDiskEncryptionSetApplyConfiguration) WithName(value string) *AzureDiskEncryptionSetApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go new file mode 100644 index 0000000000..ce7ca886fe --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capability.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// CapabilityApplyConfiguration represents a declarative configuration of the Capability type for use +// with apply. +type CapabilityApplyConfiguration struct { + Name *operatorv1.ConsoleCapabilityName `json:"name,omitempty"` + Visibility *CapabilityVisibilityApplyConfiguration `json:"visibility,omitempty"` +} + +// CapabilityApplyConfiguration constructs a declarative configuration of the Capability type for use with +// apply. +func Capability() *CapabilityApplyConfiguration { + return &CapabilityApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CapabilityApplyConfiguration) WithName(value operatorv1.ConsoleCapabilityName) *CapabilityApplyConfiguration { + b.Name = &value + return b +} + +// WithVisibility sets the Visibility field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Visibility field is set to the value of the last call. +func (b *CapabilityApplyConfiguration) WithVisibility(value *CapabilityVisibilityApplyConfiguration) *CapabilityApplyConfiguration { + b.Visibility = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go new file mode 100644 index 0000000000..9e86b3d387 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/capabilityvisibility.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// CapabilityVisibilityApplyConfiguration represents a declarative configuration of the CapabilityVisibility type for use +// with apply. +type CapabilityVisibilityApplyConfiguration struct { + State *operatorv1.CapabilityState `json:"state,omitempty"` +} + +// CapabilityVisibilityApplyConfiguration constructs a declarative configuration of the CapabilityVisibility type for use with +// apply. +func CapabilityVisibility() *CapabilityVisibilityApplyConfiguration { + return &CapabilityVisibilityApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *CapabilityVisibilityApplyConfiguration) WithState(value operatorv1.CapabilityState) *CapabilityVisibilityApplyConfiguration { + b.State = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go new file mode 100644 index 0000000000..4b6d559977 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clienttls.go @@ -0,0 +1,48 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ClientTLSApplyConfiguration represents a declarative configuration of the ClientTLS type for use +// with apply. +type ClientTLSApplyConfiguration struct { + ClientCertificatePolicy *operatorv1.ClientCertificatePolicy `json:"clientCertificatePolicy,omitempty"` + ClientCA *configv1.ConfigMapNameReference `json:"clientCA,omitempty"` + AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"` +} + +// ClientTLSApplyConfiguration constructs a declarative configuration of the ClientTLS type for use with +// apply. +func ClientTLS() *ClientTLSApplyConfiguration { + return &ClientTLSApplyConfiguration{} +} + +// WithClientCertificatePolicy sets the ClientCertificatePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientCertificatePolicy field is set to the value of the last call. +func (b *ClientTLSApplyConfiguration) WithClientCertificatePolicy(value operatorv1.ClientCertificatePolicy) *ClientTLSApplyConfiguration { + b.ClientCertificatePolicy = &value + return b +} + +// WithClientCA sets the ClientCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientCA field is set to the value of the last call. +func (b *ClientTLSApplyConfiguration) WithClientCA(value configv1.ConfigMapNameReference) *ClientTLSApplyConfiguration { + b.ClientCA = &value + return b +} + +// WithAllowedSubjectPatterns adds the given value to the AllowedSubjectPatterns field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedSubjectPatterns field. +func (b *ClientTLSApplyConfiguration) WithAllowedSubjectPatterns(values ...string) *ClientTLSApplyConfiguration { + for i := range values { + b.AllowedSubjectPatterns = append(b.AllowedSubjectPatterns, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go new file mode 100644 index 0000000000..148c6a440e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CloudCredentialApplyConfiguration represents a declarative configuration of the CloudCredential type for use +// with apply. +type CloudCredentialApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CloudCredentialSpecApplyConfiguration `json:"spec,omitempty"` + Status *CloudCredentialStatusApplyConfiguration `json:"status,omitempty"` +} + +// CloudCredential constructs a declarative configuration of the CloudCredential type for use with +// apply. +func CloudCredential(name string) *CloudCredentialApplyConfiguration { + b := &CloudCredentialApplyConfiguration{} + b.WithName(name) + b.WithKind("CloudCredential") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractCloudCredential extracts the applied configuration owned by fieldManager from +// cloudCredential. If no managedFields are found in cloudCredential for fieldManager, a +// CloudCredentialApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// cloudCredential must be a unmodified CloudCredential API object that was retrieved from the Kubernetes API. +// ExtractCloudCredential provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractCloudCredential(cloudCredential *operatorv1.CloudCredential, fieldManager string) (*CloudCredentialApplyConfiguration, error) { + return extractCloudCredential(cloudCredential, fieldManager, "") +} + +// ExtractCloudCredentialStatus is the same as ExtractCloudCredential except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCloudCredentialStatus(cloudCredential *operatorv1.CloudCredential, fieldManager string) (*CloudCredentialApplyConfiguration, error) { + return extractCloudCredential(cloudCredential, fieldManager, "status") +} + +func extractCloudCredential(cloudCredential *operatorv1.CloudCredential, fieldManager string, subresource string) (*CloudCredentialApplyConfiguration, error) { + b := &CloudCredentialApplyConfiguration{} + err := managedfields.ExtractInto(cloudCredential, internal.Parser().Type("com.github.openshift.api.operator.v1.CloudCredential"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(cloudCredential.Name) + + b.WithKind("CloudCredential") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithKind(value string) *CloudCredentialApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithAPIVersion(value string) *CloudCredentialApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithName(value string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithGenerateName(value string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithNamespace(value string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithUID(value types.UID) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithResourceVersion(value string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithGeneration(value int64) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CloudCredentialApplyConfiguration) WithLabels(entries map[string]string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CloudCredentialApplyConfiguration) WithAnnotations(entries map[string]string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CloudCredentialApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CloudCredentialApplyConfiguration) WithFinalizers(values ...string) *CloudCredentialApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *CloudCredentialApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithSpec(value *CloudCredentialSpecApplyConfiguration) *CloudCredentialApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CloudCredentialApplyConfiguration) WithStatus(value *CloudCredentialStatusApplyConfiguration) *CloudCredentialApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CloudCredentialApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go new file mode 100644 index 0000000000..60712afa28 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialspec.go @@ -0,0 +1,69 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// CloudCredentialSpecApplyConfiguration represents a declarative configuration of the CloudCredentialSpec type for use +// with apply. +type CloudCredentialSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` + CredentialsMode *operatorv1.CloudCredentialsMode `json:"credentialsMode,omitempty"` +} + +// CloudCredentialSpecApplyConfiguration constructs a declarative configuration of the CloudCredentialSpec type for use with +// apply. +func CloudCredentialSpec() *CloudCredentialSpecApplyConfiguration { + return &CloudCredentialSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *CloudCredentialSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *CloudCredentialSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *CloudCredentialSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *CloudCredentialSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *CloudCredentialSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *CloudCredentialSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *CloudCredentialSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *CloudCredentialSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *CloudCredentialSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *CloudCredentialSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithCredentialsMode sets the CredentialsMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CredentialsMode field is set to the value of the last call. +func (b *CloudCredentialSpecApplyConfiguration) WithCredentialsMode(value operatorv1.CloudCredentialsMode) *CloudCredentialSpecApplyConfiguration { + b.CredentialsMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go new file mode 100644 index 0000000000..fa6a6f0f68 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredentialstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CloudCredentialStatusApplyConfiguration represents a declarative configuration of the CloudCredentialStatus type for use +// with apply. +type CloudCredentialStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// CloudCredentialStatusApplyConfiguration constructs a declarative configuration of the CloudCredentialStatus type for use with +// apply. +func CloudCredentialStatus() *CloudCredentialStatusApplyConfiguration { + return &CloudCredentialStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *CloudCredentialStatusApplyConfiguration) WithObservedGeneration(value int64) *CloudCredentialStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *CloudCredentialStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *CloudCredentialStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *CloudCredentialStatusApplyConfiguration) WithVersion(value string) *CloudCredentialStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *CloudCredentialStatusApplyConfiguration) WithReadyReplicas(value int32) *CloudCredentialStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *CloudCredentialStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *CloudCredentialStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *CloudCredentialStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *CloudCredentialStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go new file mode 100644 index 0000000000..ed2dbb9c1a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterCSIDriverApplyConfiguration represents a declarative configuration of the ClusterCSIDriver type for use +// with apply. +type ClusterCSIDriverApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterCSIDriverSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterCSIDriverStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterCSIDriver constructs a declarative configuration of the ClusterCSIDriver type for use with +// apply. +func ClusterCSIDriver(name string) *ClusterCSIDriverApplyConfiguration { + b := &ClusterCSIDriverApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterCSIDriver") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractClusterCSIDriver extracts the applied configuration owned by fieldManager from +// clusterCSIDriver. If no managedFields are found in clusterCSIDriver for fieldManager, a +// ClusterCSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterCSIDriver must be a unmodified ClusterCSIDriver API object that was retrieved from the Kubernetes API. +// ExtractClusterCSIDriver provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterCSIDriver(clusterCSIDriver *operatorv1.ClusterCSIDriver, fieldManager string) (*ClusterCSIDriverApplyConfiguration, error) { + return extractClusterCSIDriver(clusterCSIDriver, fieldManager, "") +} + +// ExtractClusterCSIDriverStatus is the same as ExtractClusterCSIDriver except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterCSIDriverStatus(clusterCSIDriver *operatorv1.ClusterCSIDriver, fieldManager string) (*ClusterCSIDriverApplyConfiguration, error) { + return extractClusterCSIDriver(clusterCSIDriver, fieldManager, "status") +} + +func extractClusterCSIDriver(clusterCSIDriver *operatorv1.ClusterCSIDriver, fieldManager string, subresource string) (*ClusterCSIDriverApplyConfiguration, error) { + b := &ClusterCSIDriverApplyConfiguration{} + err := managedfields.ExtractInto(clusterCSIDriver, internal.Parser().Type("com.github.openshift.api.operator.v1.ClusterCSIDriver"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterCSIDriver.Name) + + b.WithKind("ClusterCSIDriver") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithKind(value string) *ClusterCSIDriverApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithAPIVersion(value string) *ClusterCSIDriverApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithName(value string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithGenerateName(value string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithNamespace(value string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithUID(value types.UID) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithResourceVersion(value string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithGeneration(value int64) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterCSIDriverApplyConfiguration) WithLabels(entries map[string]string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterCSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterCSIDriverApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterCSIDriverApplyConfiguration) WithFinalizers(values ...string) *ClusterCSIDriverApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ClusterCSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithSpec(value *ClusterCSIDriverSpecApplyConfiguration) *ClusterCSIDriverApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterCSIDriverApplyConfiguration) WithStatus(value *ClusterCSIDriverStatusApplyConfiguration) *ClusterCSIDriverApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterCSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go new file mode 100644 index 0000000000..a008180f41 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go @@ -0,0 +1,78 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ClusterCSIDriverSpecApplyConfiguration represents a declarative configuration of the ClusterCSIDriverSpec type for use +// with apply. +type ClusterCSIDriverSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` + StorageClassState *operatorv1.StorageClassStateName `json:"storageClassState,omitempty"` + DriverConfig *CSIDriverConfigSpecApplyConfiguration `json:"driverConfig,omitempty"` +} + +// ClusterCSIDriverSpecApplyConfiguration constructs a declarative configuration of the ClusterCSIDriverSpec type for use with +// apply. +func ClusterCSIDriverSpec() *ClusterCSIDriverSpecApplyConfiguration { + return &ClusterCSIDriverSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ClusterCSIDriverSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ClusterCSIDriverSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ClusterCSIDriverSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ClusterCSIDriverSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ClusterCSIDriverSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithStorageClassState sets the StorageClassState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StorageClassState field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithStorageClassState(value operatorv1.StorageClassStateName) *ClusterCSIDriverSpecApplyConfiguration { + b.StorageClassState = &value + return b +} + +// WithDriverConfig sets the DriverConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverConfig field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithDriverConfig(value *CSIDriverConfigSpecApplyConfiguration) *ClusterCSIDriverSpecApplyConfiguration { + b.DriverConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go new file mode 100644 index 0000000000..f5e2221b87 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterCSIDriverStatusApplyConfiguration represents a declarative configuration of the ClusterCSIDriverStatus type for use +// with apply. +type ClusterCSIDriverStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// ClusterCSIDriverStatusApplyConfiguration constructs a declarative configuration of the ClusterCSIDriverStatus type for use with +// apply. +func ClusterCSIDriverStatus() *ClusterCSIDriverStatusApplyConfiguration { + return &ClusterCSIDriverStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ClusterCSIDriverStatusApplyConfiguration) WithObservedGeneration(value int64) *ClusterCSIDriverStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ClusterCSIDriverStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *ClusterCSIDriverStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ClusterCSIDriverStatusApplyConfiguration) WithVersion(value string) *ClusterCSIDriverStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *ClusterCSIDriverStatusApplyConfiguration) WithReadyReplicas(value int32) *ClusterCSIDriverStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *ClusterCSIDriverStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ClusterCSIDriverStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *ClusterCSIDriverStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *ClusterCSIDriverStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusternetworkentry.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusternetworkentry.go new file mode 100644 index 0000000000..ac180f893d --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusternetworkentry.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterNetworkEntryApplyConfiguration represents a declarative configuration of the ClusterNetworkEntry type for use +// with apply. +type ClusterNetworkEntryApplyConfiguration struct { + CIDR *string `json:"cidr,omitempty"` + HostPrefix *uint32 `json:"hostPrefix,omitempty"` +} + +// ClusterNetworkEntryApplyConfiguration constructs a declarative configuration of the ClusterNetworkEntry type for use with +// apply. +func ClusterNetworkEntry() *ClusterNetworkEntryApplyConfiguration { + return &ClusterNetworkEntryApplyConfiguration{} +} + +// WithCIDR sets the CIDR field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CIDR field is set to the value of the last call. +func (b *ClusterNetworkEntryApplyConfiguration) WithCIDR(value string) *ClusterNetworkEntryApplyConfiguration { + b.CIDR = &value + return b +} + +// WithHostPrefix sets the HostPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostPrefix field is set to the value of the last call. +func (b *ClusterNetworkEntryApplyConfiguration) WithHostPrefix(value uint32) *ClusterNetworkEntryApplyConfiguration { + b.HostPrefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go new file mode 100644 index 0000000000..b884322ae6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ConfigApplyConfiguration represents a declarative configuration of the Config type for use +// with apply. +type ConfigApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ConfigSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConfigStatusApplyConfiguration `json:"status,omitempty"` +} + +// Config constructs a declarative configuration of the Config type for use with +// apply. +func Config(name string) *ConfigApplyConfiguration { + b := &ConfigApplyConfiguration{} + b.WithName(name) + b.WithKind("Config") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractConfig extracts the applied configuration owned by fieldManager from +// config. If no managedFields are found in config for fieldManager, a +// ConfigApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// config must be a unmodified Config API object that was retrieved from the Kubernetes API. +// ExtractConfig provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractConfig(config *operatorv1.Config, fieldManager string) (*ConfigApplyConfiguration, error) { + return extractConfig(config, fieldManager, "") +} + +// ExtractConfigStatus is the same as ExtractConfig except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractConfigStatus(config *operatorv1.Config, fieldManager string) (*ConfigApplyConfiguration, error) { + return extractConfig(config, fieldManager, "status") +} + +func extractConfig(config *operatorv1.Config, fieldManager string, subresource string) (*ConfigApplyConfiguration, error) { + b := &ConfigApplyConfiguration{} + err := managedfields.ExtractInto(config, internal.Parser().Type("com.github.openshift.api.operator.v1.Config"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(config.Name) + + b.WithKind("Config") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithKind(value string) *ConfigApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithAPIVersion(value string) *ConfigApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithName(value string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithGenerateName(value string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithNamespace(value string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithUID(value types.UID) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithResourceVersion(value string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithGeneration(value int64) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ConfigApplyConfiguration) WithLabels(entries map[string]string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ConfigApplyConfiguration) WithAnnotations(entries map[string]string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ConfigApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ConfigApplyConfiguration) WithFinalizers(values ...string) *ConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithSpec(value *ConfigSpecApplyConfiguration) *ConfigApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ConfigApplyConfiguration) WithStatus(value *ConfigStatusApplyConfiguration) *ConfigApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConfigApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configmapfilereference.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configmapfilereference.go new file mode 100644 index 0000000000..3c70be2c1e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configmapfilereference.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConfigMapFileReferenceApplyConfiguration represents a declarative configuration of the ConfigMapFileReference type for use +// with apply. +type ConfigMapFileReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Key *string `json:"key,omitempty"` +} + +// ConfigMapFileReferenceApplyConfiguration constructs a declarative configuration of the ConfigMapFileReference type for use with +// apply. +func ConfigMapFileReference() *ConfigMapFileReferenceApplyConfiguration { + return &ConfigMapFileReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConfigMapFileReferenceApplyConfiguration) WithName(value string) *ConfigMapFileReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *ConfigMapFileReferenceApplyConfiguration) WithKey(value string) *ConfigMapFileReferenceApplyConfiguration { + b.Key = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go new file mode 100644 index 0000000000..c7d3e93c30 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigSpecApplyConfiguration represents a declarative configuration of the ConfigSpec type for use +// with apply. +type ConfigSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// ConfigSpecApplyConfiguration constructs a declarative configuration of the ConfigSpec type for use with +// apply. +func ConfigSpec() *ConfigSpecApplyConfiguration { + return &ConfigSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *ConfigSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ConfigSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ConfigSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ConfigSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ConfigSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ConfigSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *ConfigSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ConfigSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *ConfigSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ConfigSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go new file mode 100644 index 0000000000..38e52420c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/configstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConfigStatusApplyConfiguration represents a declarative configuration of the ConfigStatus type for use +// with apply. +type ConfigStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// ConfigStatusApplyConfiguration constructs a declarative configuration of the ConfigStatus type for use with +// apply. +func ConfigStatus() *ConfigStatusApplyConfiguration { + return &ConfigStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ConfigStatusApplyConfiguration) WithObservedGeneration(value int64) *ConfigStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ConfigStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *ConfigStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ConfigStatusApplyConfiguration) WithVersion(value string) *ConfigStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *ConfigStatusApplyConfiguration) WithReadyReplicas(value int32) *ConfigStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *ConfigStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ConfigStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *ConfigStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *ConfigStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go new file mode 100644 index 0000000000..aaa69f64c0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ConsoleApplyConfiguration represents a declarative configuration of the Console type for use +// with apply. +type ConsoleApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` +} + +// Console constructs a declarative configuration of the Console type for use with +// apply. +func Console(name string) *ConsoleApplyConfiguration { + b := &ConsoleApplyConfiguration{} + b.WithName(name) + b.WithKind("Console") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractConsole extracts the applied configuration owned by fieldManager from +// console. If no managedFields are found in console for fieldManager, a +// ConsoleApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// console must be a unmodified Console API object that was retrieved from the Kubernetes API. +// ExtractConsole provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractConsole(console *operatorv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { + return extractConsole(console, fieldManager, "") +} + +// ExtractConsoleStatus is the same as ExtractConsole except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractConsoleStatus(console *operatorv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) { + return extractConsole(console, fieldManager, "status") +} + +func extractConsole(console *operatorv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) { + b := &ConsoleApplyConfiguration{} + err := managedfields.ExtractInto(console, internal.Parser().Type("com.github.openshift.api.operator.v1.Console"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(console.Name) + + b.WithKind("Console") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithGeneration(value int64) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ConsoleApplyConfiguration) WithFinalizers(values ...string) *ConsoleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ConsoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithSpec(value *ConsoleSpecApplyConfiguration) *ConsoleApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfiguration) *ConsoleApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go new file mode 100644 index 0000000000..b71ac9f3ab --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleconfigroute.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ConsoleConfigRouteApplyConfiguration represents a declarative configuration of the ConsoleConfigRoute type for use +// with apply. +type ConsoleConfigRouteApplyConfiguration struct { + Hostname *string `json:"hostname,omitempty"` + Secret *configv1.SecretNameReference `json:"secret,omitempty"` +} + +// ConsoleConfigRouteApplyConfiguration constructs a declarative configuration of the ConsoleConfigRoute type for use with +// apply. +func ConsoleConfigRoute() *ConsoleConfigRouteApplyConfiguration { + return &ConsoleConfigRouteApplyConfiguration{} +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *ConsoleConfigRouteApplyConfiguration) WithHostname(value string) *ConsoleConfigRouteApplyConfiguration { + b.Hostname = &value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *ConsoleConfigRouteApplyConfiguration) WithSecret(value configv1.SecretNameReference) *ConsoleConfigRouteApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go new file mode 100644 index 0000000000..8849556280 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go @@ -0,0 +1,133 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ConsoleCustomizationApplyConfiguration represents a declarative configuration of the ConsoleCustomization type for use +// with apply. +type ConsoleCustomizationApplyConfiguration struct { + Logos []LogoApplyConfiguration `json:"logos,omitempty"` + Capabilities []CapabilityApplyConfiguration `json:"capabilities,omitempty"` + Brand *operatorv1.Brand `json:"brand,omitempty"` + DocumentationBaseURL *string `json:"documentationBaseURL,omitempty"` + CustomProductName *string `json:"customProductName,omitempty"` + CustomLogoFile *configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` + DeveloperCatalog *DeveloperConsoleCatalogCustomizationApplyConfiguration `json:"developerCatalog,omitempty"` + ProjectAccess *ProjectAccessApplyConfiguration `json:"projectAccess,omitempty"` + QuickStarts *QuickStartsApplyConfiguration `json:"quickStarts,omitempty"` + AddPage *AddPageApplyConfiguration `json:"addPage,omitempty"` + Perspectives []PerspectiveApplyConfiguration `json:"perspectives,omitempty"` +} + +// ConsoleCustomizationApplyConfiguration constructs a declarative configuration of the ConsoleCustomization type for use with +// apply. +func ConsoleCustomization() *ConsoleCustomizationApplyConfiguration { + return &ConsoleCustomizationApplyConfiguration{} +} + +// WithLogos adds the given value to the Logos field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Logos field. +func (b *ConsoleCustomizationApplyConfiguration) WithLogos(values ...*LogoApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithLogos") + } + b.Logos = append(b.Logos, *values[i]) + } + return b +} + +// WithCapabilities adds the given value to the Capabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Capabilities field. +func (b *ConsoleCustomizationApplyConfiguration) WithCapabilities(values ...*CapabilityApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCapabilities") + } + b.Capabilities = append(b.Capabilities, *values[i]) + } + return b +} + +// WithBrand sets the Brand field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Brand field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithBrand(value operatorv1.Brand) *ConsoleCustomizationApplyConfiguration { + b.Brand = &value + return b +} + +// WithDocumentationBaseURL sets the DocumentationBaseURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DocumentationBaseURL field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithDocumentationBaseURL(value string) *ConsoleCustomizationApplyConfiguration { + b.DocumentationBaseURL = &value + return b +} + +// WithCustomProductName sets the CustomProductName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomProductName field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithCustomProductName(value string) *ConsoleCustomizationApplyConfiguration { + b.CustomProductName = &value + return b +} + +// WithCustomLogoFile sets the CustomLogoFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomLogoFile field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithCustomLogoFile(value configv1.ConfigMapFileReference) *ConsoleCustomizationApplyConfiguration { + b.CustomLogoFile = &value + return b +} + +// WithDeveloperCatalog sets the DeveloperCatalog field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeveloperCatalog field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithDeveloperCatalog(value *DeveloperConsoleCatalogCustomizationApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + b.DeveloperCatalog = value + return b +} + +// WithProjectAccess sets the ProjectAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectAccess field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithProjectAccess(value *ProjectAccessApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + b.ProjectAccess = value + return b +} + +// WithQuickStarts sets the QuickStarts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QuickStarts field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithQuickStarts(value *QuickStartsApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + b.QuickStarts = value + return b +} + +// WithAddPage sets the AddPage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AddPage field is set to the value of the last call. +func (b *ConsoleCustomizationApplyConfiguration) WithAddPage(value *AddPageApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + b.AddPage = value + return b +} + +// WithPerspectives adds the given value to the Perspectives field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Perspectives field. +func (b *ConsoleCustomizationApplyConfiguration) WithPerspectives(values ...*PerspectiveApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPerspectives") + } + b.Perspectives = append(b.Perspectives, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleproviders.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleproviders.go new file mode 100644 index 0000000000..97281e5c56 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consoleproviders.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConsoleProvidersApplyConfiguration represents a declarative configuration of the ConsoleProviders type for use +// with apply. +type ConsoleProvidersApplyConfiguration struct { + Statuspage *StatuspageProviderApplyConfiguration `json:"statuspage,omitempty"` +} + +// ConsoleProvidersApplyConfiguration constructs a declarative configuration of the ConsoleProviders type for use with +// apply. +func ConsoleProviders() *ConsoleProvidersApplyConfiguration { + return &ConsoleProvidersApplyConfiguration{} +} + +// WithStatuspage sets the Statuspage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Statuspage field is set to the value of the last call. +func (b *ConsoleProvidersApplyConfiguration) WithStatuspage(value *StatuspageProviderApplyConfiguration) *ConsoleProvidersApplyConfiguration { + b.Statuspage = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go new file mode 100644 index 0000000000..0155ffef34 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolespec.go @@ -0,0 +1,107 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ConsoleSpecApplyConfiguration represents a declarative configuration of the ConsoleSpec type for use +// with apply. +type ConsoleSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` + Customization *ConsoleCustomizationApplyConfiguration `json:"customization,omitempty"` + Providers *ConsoleProvidersApplyConfiguration `json:"providers,omitempty"` + Route *ConsoleConfigRouteApplyConfiguration `json:"route,omitempty"` + Plugins []string `json:"plugins,omitempty"` + Ingress *IngressApplyConfiguration `json:"ingress,omitempty"` +} + +// ConsoleSpecApplyConfiguration constructs a declarative configuration of the ConsoleSpec type for use with +// apply. +func ConsoleSpec() *ConsoleSpecApplyConfiguration { + return &ConsoleSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ConsoleSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ConsoleSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ConsoleSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ConsoleSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ConsoleSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithCustomization sets the Customization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Customization field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithCustomization(value *ConsoleCustomizationApplyConfiguration) *ConsoleSpecApplyConfiguration { + b.Customization = value + return b +} + +// WithProviders sets the Providers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Providers field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithProviders(value *ConsoleProvidersApplyConfiguration) *ConsoleSpecApplyConfiguration { + b.Providers = value + return b +} + +// WithRoute sets the Route field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Route field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithRoute(value *ConsoleConfigRouteApplyConfiguration) *ConsoleSpecApplyConfiguration { + b.Route = value + return b +} + +// WithPlugins adds the given value to the Plugins field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Plugins field. +func (b *ConsoleSpecApplyConfiguration) WithPlugins(values ...string) *ConsoleSpecApplyConfiguration { + for i := range values { + b.Plugins = append(b.Plugins, values[i]) + } + return b +} + +// WithIngress sets the Ingress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ingress field is set to the value of the last call. +func (b *ConsoleSpecApplyConfiguration) WithIngress(value *IngressApplyConfiguration) *ConsoleSpecApplyConfiguration { + b.Ingress = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go new file mode 100644 index 0000000000..ad34934776 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolestatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ConsoleStatusApplyConfiguration represents a declarative configuration of the ConsoleStatus type for use +// with apply. +type ConsoleStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// ConsoleStatusApplyConfiguration constructs a declarative configuration of the ConsoleStatus type for use with +// apply. +func ConsoleStatus() *ConsoleStatusApplyConfiguration { + return &ConsoleStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ConsoleStatusApplyConfiguration) WithObservedGeneration(value int64) *ConsoleStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ConsoleStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *ConsoleStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ConsoleStatusApplyConfiguration) WithVersion(value string) *ConsoleStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *ConsoleStatusApplyConfiguration) WithReadyReplicas(value int32) *ConsoleStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *ConsoleStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ConsoleStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *ConsoleStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *ConsoleStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/containerloggingdestinationparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/containerloggingdestinationparameters.go new file mode 100644 index 0000000000..633bbca1a6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/containerloggingdestinationparameters.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ContainerLoggingDestinationParametersApplyConfiguration represents a declarative configuration of the ContainerLoggingDestinationParameters type for use +// with apply. +type ContainerLoggingDestinationParametersApplyConfiguration struct { + MaxLength *int32 `json:"maxLength,omitempty"` +} + +// ContainerLoggingDestinationParametersApplyConfiguration constructs a declarative configuration of the ContainerLoggingDestinationParameters type for use with +// apply. +func ContainerLoggingDestinationParameters() *ContainerLoggingDestinationParametersApplyConfiguration { + return &ContainerLoggingDestinationParametersApplyConfiguration{} +} + +// WithMaxLength sets the MaxLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLength field is set to the value of the last call. +func (b *ContainerLoggingDestinationParametersApplyConfiguration) WithMaxLength(value int32) *ContainerLoggingDestinationParametersApplyConfiguration { + b.MaxLength = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go new file mode 100644 index 0000000000..15b793452a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go @@ -0,0 +1,72 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// CSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the CSIDriverConfigSpec type for use +// with apply. +type CSIDriverConfigSpecApplyConfiguration struct { + DriverType *operatorv1.CSIDriverType `json:"driverType,omitempty"` + AWS *AWSCSIDriverConfigSpecApplyConfiguration `json:"aws,omitempty"` + Azure *AzureCSIDriverConfigSpecApplyConfiguration `json:"azure,omitempty"` + GCP *GCPCSIDriverConfigSpecApplyConfiguration `json:"gcp,omitempty"` + IBMCloud *IBMCloudCSIDriverConfigSpecApplyConfiguration `json:"ibmcloud,omitempty"` + VSphere *VSphereCSIDriverConfigSpecApplyConfiguration `json:"vSphere,omitempty"` +} + +// CSIDriverConfigSpecApplyConfiguration constructs a declarative configuration of the CSIDriverConfigSpec type for use with +// apply. +func CSIDriverConfigSpec() *CSIDriverConfigSpecApplyConfiguration { + return &CSIDriverConfigSpecApplyConfiguration{} +} + +// WithDriverType sets the DriverType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverType field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithDriverType(value operatorv1.CSIDriverType) *CSIDriverConfigSpecApplyConfiguration { + b.DriverType = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithAWS(value *AWSCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.AWS = value + return b +} + +// WithAzure sets the Azure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Azure field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithAzure(value *AzureCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.Azure = value + return b +} + +// WithGCP sets the GCP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GCP field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithGCP(value *GCPCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.GCP = value + return b +} + +// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IBMCloud field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithIBMCloud(value *IBMCloudCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.IBMCloud = value + return b +} + +// WithVSphere sets the VSphere field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VSphere field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithVSphere(value *VSphereCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.VSphere = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go new file mode 100644 index 0000000000..d9b93f6206 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CSISnapshotControllerApplyConfiguration represents a declarative configuration of the CSISnapshotController type for use +// with apply. +type CSISnapshotControllerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CSISnapshotControllerSpecApplyConfiguration `json:"spec,omitempty"` + Status *CSISnapshotControllerStatusApplyConfiguration `json:"status,omitempty"` +} + +// CSISnapshotController constructs a declarative configuration of the CSISnapshotController type for use with +// apply. +func CSISnapshotController(name string) *CSISnapshotControllerApplyConfiguration { + b := &CSISnapshotControllerApplyConfiguration{} + b.WithName(name) + b.WithKind("CSISnapshotController") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractCSISnapshotController extracts the applied configuration owned by fieldManager from +// cSISnapshotController. If no managedFields are found in cSISnapshotController for fieldManager, a +// CSISnapshotControllerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// cSISnapshotController must be a unmodified CSISnapshotController API object that was retrieved from the Kubernetes API. +// ExtractCSISnapshotController provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractCSISnapshotController(cSISnapshotController *operatorv1.CSISnapshotController, fieldManager string) (*CSISnapshotControllerApplyConfiguration, error) { + return extractCSISnapshotController(cSISnapshotController, fieldManager, "") +} + +// ExtractCSISnapshotControllerStatus is the same as ExtractCSISnapshotController except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSISnapshotControllerStatus(cSISnapshotController *operatorv1.CSISnapshotController, fieldManager string) (*CSISnapshotControllerApplyConfiguration, error) { + return extractCSISnapshotController(cSISnapshotController, fieldManager, "status") +} + +func extractCSISnapshotController(cSISnapshotController *operatorv1.CSISnapshotController, fieldManager string, subresource string) (*CSISnapshotControllerApplyConfiguration, error) { + b := &CSISnapshotControllerApplyConfiguration{} + err := managedfields.ExtractInto(cSISnapshotController, internal.Parser().Type("com.github.openshift.api.operator.v1.CSISnapshotController"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(cSISnapshotController.Name) + + b.WithKind("CSISnapshotController") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithKind(value string) *CSISnapshotControllerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithAPIVersion(value string) *CSISnapshotControllerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithName(value string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithGenerateName(value string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithNamespace(value string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithUID(value types.UID) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithResourceVersion(value string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithGeneration(value int64) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CSISnapshotControllerApplyConfiguration) WithLabels(entries map[string]string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CSISnapshotControllerApplyConfiguration) WithAnnotations(entries map[string]string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CSISnapshotControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CSISnapshotControllerApplyConfiguration) WithFinalizers(values ...string) *CSISnapshotControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *CSISnapshotControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithSpec(value *CSISnapshotControllerSpecApplyConfiguration) *CSISnapshotControllerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CSISnapshotControllerApplyConfiguration) WithStatus(value *CSISnapshotControllerStatusApplyConfiguration) *CSISnapshotControllerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSISnapshotControllerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go new file mode 100644 index 0000000000..fd90faa2de --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// CSISnapshotControllerSpecApplyConfiguration represents a declarative configuration of the CSISnapshotControllerSpec type for use +// with apply. +type CSISnapshotControllerSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// CSISnapshotControllerSpecApplyConfiguration constructs a declarative configuration of the CSISnapshotControllerSpec type for use with +// apply. +func CSISnapshotControllerSpec() *CSISnapshotControllerSpecApplyConfiguration { + return &CSISnapshotControllerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *CSISnapshotControllerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *CSISnapshotControllerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *CSISnapshotControllerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *CSISnapshotControllerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *CSISnapshotControllerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *CSISnapshotControllerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *CSISnapshotControllerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *CSISnapshotControllerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *CSISnapshotControllerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *CSISnapshotControllerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go new file mode 100644 index 0000000000..5b6d30d8d5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontrollerstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CSISnapshotControllerStatusApplyConfiguration represents a declarative configuration of the CSISnapshotControllerStatus type for use +// with apply. +type CSISnapshotControllerStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// CSISnapshotControllerStatusApplyConfiguration constructs a declarative configuration of the CSISnapshotControllerStatus type for use with +// apply. +func CSISnapshotControllerStatus() *CSISnapshotControllerStatusApplyConfiguration { + return &CSISnapshotControllerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *CSISnapshotControllerStatusApplyConfiguration) WithObservedGeneration(value int64) *CSISnapshotControllerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *CSISnapshotControllerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *CSISnapshotControllerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *CSISnapshotControllerStatusApplyConfiguration) WithVersion(value string) *CSISnapshotControllerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *CSISnapshotControllerStatusApplyConfiguration) WithReadyReplicas(value int32) *CSISnapshotControllerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *CSISnapshotControllerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *CSISnapshotControllerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *CSISnapshotControllerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *CSISnapshotControllerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go new file mode 100644 index 0000000000..b62419ac4a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/defaultnetworkdefinition.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// DefaultNetworkDefinitionApplyConfiguration represents a declarative configuration of the DefaultNetworkDefinition type for use +// with apply. +type DefaultNetworkDefinitionApplyConfiguration struct { + Type *operatorv1.NetworkType `json:"type,omitempty"` + OpenShiftSDNConfig *OpenShiftSDNConfigApplyConfiguration `json:"openshiftSDNConfig,omitempty"` + OVNKubernetesConfig *OVNKubernetesConfigApplyConfiguration `json:"ovnKubernetesConfig,omitempty"` +} + +// DefaultNetworkDefinitionApplyConfiguration constructs a declarative configuration of the DefaultNetworkDefinition type for use with +// apply. +func DefaultNetworkDefinition() *DefaultNetworkDefinitionApplyConfiguration { + return &DefaultNetworkDefinitionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DefaultNetworkDefinitionApplyConfiguration) WithType(value operatorv1.NetworkType) *DefaultNetworkDefinitionApplyConfiguration { + b.Type = &value + return b +} + +// WithOpenShiftSDNConfig sets the OpenShiftSDNConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenShiftSDNConfig field is set to the value of the last call. +func (b *DefaultNetworkDefinitionApplyConfiguration) WithOpenShiftSDNConfig(value *OpenShiftSDNConfigApplyConfiguration) *DefaultNetworkDefinitionApplyConfiguration { + b.OpenShiftSDNConfig = value + return b +} + +// WithOVNKubernetesConfig sets the OVNKubernetesConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OVNKubernetesConfig field is set to the value of the last call. +func (b *DefaultNetworkDefinitionApplyConfiguration) WithOVNKubernetesConfig(value *OVNKubernetesConfigApplyConfiguration) *DefaultNetworkDefinitionApplyConfiguration { + b.OVNKubernetesConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go new file mode 100644 index 0000000000..2a296d7312 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategory.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DeveloperConsoleCatalogCategoryApplyConfiguration represents a declarative configuration of the DeveloperConsoleCatalogCategory type for use +// with apply. +type DeveloperConsoleCatalogCategoryApplyConfiguration struct { + DeveloperConsoleCatalogCategoryMetaApplyConfiguration `json:",inline"` + Subcategories []DeveloperConsoleCatalogCategoryMetaApplyConfiguration `json:"subcategories,omitempty"` +} + +// DeveloperConsoleCatalogCategoryApplyConfiguration constructs a declarative configuration of the DeveloperConsoleCatalogCategory type for use with +// apply. +func DeveloperConsoleCatalogCategory() *DeveloperConsoleCatalogCategoryApplyConfiguration { + return &DeveloperConsoleCatalogCategoryApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithID(value string) *DeveloperConsoleCatalogCategoryApplyConfiguration { + b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.ID = &value + return b +} + +// WithLabel sets the Label field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Label field is set to the value of the last call. +func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithLabel(value string) *DeveloperConsoleCatalogCategoryApplyConfiguration { + b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.Label = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithTags(values ...string) *DeveloperConsoleCatalogCategoryApplyConfiguration { + for i := range values { + b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.Tags = append(b.DeveloperConsoleCatalogCategoryMetaApplyConfiguration.Tags, values[i]) + } + return b +} + +// WithSubcategories adds the given value to the Subcategories field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subcategories field. +func (b *DeveloperConsoleCatalogCategoryApplyConfiguration) WithSubcategories(values ...*DeveloperConsoleCatalogCategoryMetaApplyConfiguration) *DeveloperConsoleCatalogCategoryApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubcategories") + } + b.Subcategories = append(b.Subcategories, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategorymeta.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategorymeta.go new file mode 100644 index 0000000000..ce6e7fdda4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcategorymeta.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DeveloperConsoleCatalogCategoryMetaApplyConfiguration represents a declarative configuration of the DeveloperConsoleCatalogCategoryMeta type for use +// with apply. +type DeveloperConsoleCatalogCategoryMetaApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Label *string `json:"label,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// DeveloperConsoleCatalogCategoryMetaApplyConfiguration constructs a declarative configuration of the DeveloperConsoleCatalogCategoryMeta type for use with +// apply. +func DeveloperConsoleCatalogCategoryMeta() *DeveloperConsoleCatalogCategoryMetaApplyConfiguration { + return &DeveloperConsoleCatalogCategoryMetaApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *DeveloperConsoleCatalogCategoryMetaApplyConfiguration) WithID(value string) *DeveloperConsoleCatalogCategoryMetaApplyConfiguration { + b.ID = &value + return b +} + +// WithLabel sets the Label field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Label field is set to the value of the last call. +func (b *DeveloperConsoleCatalogCategoryMetaApplyConfiguration) WithLabel(value string) *DeveloperConsoleCatalogCategoryMetaApplyConfiguration { + b.Label = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *DeveloperConsoleCatalogCategoryMetaApplyConfiguration) WithTags(values ...string) *DeveloperConsoleCatalogCategoryMetaApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go new file mode 100644 index 0000000000..fcbc810995 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DeveloperConsoleCatalogCustomizationApplyConfiguration represents a declarative configuration of the DeveloperConsoleCatalogCustomization type for use +// with apply. +type DeveloperConsoleCatalogCustomizationApplyConfiguration struct { + Categories []DeveloperConsoleCatalogCategoryApplyConfiguration `json:"categories,omitempty"` + Types *DeveloperConsoleCatalogTypesApplyConfiguration `json:"types,omitempty"` +} + +// DeveloperConsoleCatalogCustomizationApplyConfiguration constructs a declarative configuration of the DeveloperConsoleCatalogCustomization type for use with +// apply. +func DeveloperConsoleCatalogCustomization() *DeveloperConsoleCatalogCustomizationApplyConfiguration { + return &DeveloperConsoleCatalogCustomizationApplyConfiguration{} +} + +// WithCategories adds the given value to the Categories field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Categories field. +func (b *DeveloperConsoleCatalogCustomizationApplyConfiguration) WithCategories(values ...*DeveloperConsoleCatalogCategoryApplyConfiguration) *DeveloperConsoleCatalogCustomizationApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithCategories") + } + b.Categories = append(b.Categories, *values[i]) + } + return b +} + +// WithTypes sets the Types field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Types field is set to the value of the last call. +func (b *DeveloperConsoleCatalogCustomizationApplyConfiguration) WithTypes(value *DeveloperConsoleCatalogTypesApplyConfiguration) *DeveloperConsoleCatalogCustomizationApplyConfiguration { + b.Types = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go new file mode 100644 index 0000000000..d847d2065b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// DeveloperConsoleCatalogTypesApplyConfiguration represents a declarative configuration of the DeveloperConsoleCatalogTypes type for use +// with apply. +type DeveloperConsoleCatalogTypesApplyConfiguration struct { + State *operatorv1.CatalogTypesState `json:"state,omitempty"` + Enabled *[]string `json:"enabled,omitempty"` + Disabled *[]string `json:"disabled,omitempty"` +} + +// DeveloperConsoleCatalogTypesApplyConfiguration constructs a declarative configuration of the DeveloperConsoleCatalogTypes type for use with +// apply. +func DeveloperConsoleCatalogTypes() *DeveloperConsoleCatalogTypesApplyConfiguration { + return &DeveloperConsoleCatalogTypesApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithState(value operatorv1.CatalogTypesState) *DeveloperConsoleCatalogTypesApplyConfiguration { + b.State = &value + return b +} + +// WithEnabled sets the Enabled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Enabled field is set to the value of the last call. +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithEnabled(value []string) *DeveloperConsoleCatalogTypesApplyConfiguration { + b.Enabled = &value + return b +} + +// WithDisabled sets the Disabled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Disabled field is set to the value of the last call. +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithDisabled(value []string) *DeveloperConsoleCatalogTypesApplyConfiguration { + b.Disabled = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go new file mode 100644 index 0000000000..7b2cb3d364 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DNSApplyConfiguration represents a declarative configuration of the DNS type for use +// with apply. +type DNSApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"` + Status *DNSStatusApplyConfiguration `json:"status,omitempty"` +} + +// DNS constructs a declarative configuration of the DNS type for use with +// apply. +func DNS(name string) *DNSApplyConfiguration { + b := &DNSApplyConfiguration{} + b.WithName(name) + b.WithKind("DNS") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractDNS extracts the applied configuration owned by fieldManager from +// dNS. If no managedFields are found in dNS for fieldManager, a +// DNSApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// dNS must be a unmodified DNS API object that was retrieved from the Kubernetes API. +// ExtractDNS provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDNS(dNS *operatorv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { + return extractDNS(dNS, fieldManager, "") +} + +// ExtractDNSStatus is the same as ExtractDNS except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDNSStatus(dNS *operatorv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) { + return extractDNS(dNS, fieldManager, "status") +} + +func extractDNS(dNS *operatorv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) { + b := &DNSApplyConfiguration{} + err := managedfields.ExtractInto(dNS, internal.Parser().Type("com.github.openshift.api.operator.v1.DNS"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(dNS.Name) + + b.WithKind("DNS") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithGeneration(value int64) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DNSApplyConfiguration) WithFinalizers(values ...string) *DNSApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DNSApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithSpec(value *DNSSpecApplyConfiguration) *DNSApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *DNSApplyConfiguration) WithStatus(value *DNSStatusApplyConfiguration) *DNSApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DNSApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go new file mode 100644 index 0000000000..09244ed903 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnscache.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DNSCacheApplyConfiguration represents a declarative configuration of the DNSCache type for use +// with apply. +type DNSCacheApplyConfiguration struct { + PositiveTTL *metav1.Duration `json:"positiveTTL,omitempty"` + NegativeTTL *metav1.Duration `json:"negativeTTL,omitempty"` +} + +// DNSCacheApplyConfiguration constructs a declarative configuration of the DNSCache type for use with +// apply. +func DNSCache() *DNSCacheApplyConfiguration { + return &DNSCacheApplyConfiguration{} +} + +// WithPositiveTTL sets the PositiveTTL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PositiveTTL field is set to the value of the last call. +func (b *DNSCacheApplyConfiguration) WithPositiveTTL(value metav1.Duration) *DNSCacheApplyConfiguration { + b.PositiveTTL = &value + return b +} + +// WithNegativeTTL sets the NegativeTTL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NegativeTTL field is set to the value of the last call. +func (b *DNSCacheApplyConfiguration) WithNegativeTTL(value metav1.Duration) *DNSCacheApplyConfiguration { + b.NegativeTTL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go new file mode 100644 index 0000000000..b82c86dd35 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsnodeplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// DNSNodePlacementApplyConfiguration represents a declarative configuration of the DNSNodePlacement type for use +// with apply. +type DNSNodePlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// DNSNodePlacementApplyConfiguration constructs a declarative configuration of the DNSNodePlacement type for use with +// apply. +func DNSNodePlacement() *DNSNodePlacementApplyConfiguration { + return &DNSNodePlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *DNSNodePlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *DNSNodePlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *DNSNodePlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *DNSNodePlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go new file mode 100644 index 0000000000..7267bca62b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsovertlsconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// DNSOverTLSConfigApplyConfiguration represents a declarative configuration of the DNSOverTLSConfig type for use +// with apply. +type DNSOverTLSConfigApplyConfiguration struct { + ServerName *string `json:"serverName,omitempty"` + CABundle *configv1.ConfigMapNameReference `json:"caBundle,omitempty"` +} + +// DNSOverTLSConfigApplyConfiguration constructs a declarative configuration of the DNSOverTLSConfig type for use with +// apply. +func DNSOverTLSConfig() *DNSOverTLSConfigApplyConfiguration { + return &DNSOverTLSConfigApplyConfiguration{} +} + +// WithServerName sets the ServerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerName field is set to the value of the last call. +func (b *DNSOverTLSConfigApplyConfiguration) WithServerName(value string) *DNSOverTLSConfigApplyConfiguration { + b.ServerName = &value + return b +} + +// WithCABundle sets the CABundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CABundle field is set to the value of the last call. +func (b *DNSOverTLSConfigApplyConfiguration) WithCABundle(value configv1.ConfigMapNameReference) *DNSOverTLSConfigApplyConfiguration { + b.CABundle = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsspec.go new file mode 100644 index 0000000000..2016291891 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsspec.go @@ -0,0 +1,86 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// DNSSpecApplyConfiguration represents a declarative configuration of the DNSSpec type for use +// with apply. +type DNSSpecApplyConfiguration struct { + Servers []ServerApplyConfiguration `json:"servers,omitempty"` + UpstreamResolvers *UpstreamResolversApplyConfiguration `json:"upstreamResolvers,omitempty"` + NodePlacement *DNSNodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` + ManagementState *operatorv1.ManagementState `json:"managementState,omitempty"` + OperatorLogLevel *operatorv1.DNSLogLevel `json:"operatorLogLevel,omitempty"` + LogLevel *operatorv1.DNSLogLevel `json:"logLevel,omitempty"` + Cache *DNSCacheApplyConfiguration `json:"cache,omitempty"` +} + +// DNSSpecApplyConfiguration constructs a declarative configuration of the DNSSpec type for use with +// apply. +func DNSSpec() *DNSSpecApplyConfiguration { + return &DNSSpecApplyConfiguration{} +} + +// WithServers adds the given value to the Servers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Servers field. +func (b *DNSSpecApplyConfiguration) WithServers(values ...*ServerApplyConfiguration) *DNSSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServers") + } + b.Servers = append(b.Servers, *values[i]) + } + return b +} + +// WithUpstreamResolvers sets the UpstreamResolvers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpstreamResolvers field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithUpstreamResolvers(value *UpstreamResolversApplyConfiguration) *DNSSpecApplyConfiguration { + b.UpstreamResolvers = value + return b +} + +// WithNodePlacement sets the NodePlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodePlacement field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithNodePlacement(value *DNSNodePlacementApplyConfiguration) *DNSSpecApplyConfiguration { + b.NodePlacement = value + return b +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *DNSSpecApplyConfiguration { + b.ManagementState = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.DNSLogLevel) *DNSSpecApplyConfiguration { + b.OperatorLogLevel = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithLogLevel(value operatorv1.DNSLogLevel) *DNSSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithCache sets the Cache field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cache field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithCache(value *DNSCacheApplyConfiguration) *DNSSpecApplyConfiguration { + b.Cache = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsstatus.go new file mode 100644 index 0000000000..9c86669318 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnsstatus.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// DNSStatusApplyConfiguration represents a declarative configuration of the DNSStatus type for use +// with apply. +type DNSStatusApplyConfiguration struct { + ClusterIP *string `json:"clusterIP,omitempty"` + ClusterDomain *string `json:"clusterDomain,omitempty"` + Conditions []OperatorConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// DNSStatusApplyConfiguration constructs a declarative configuration of the DNSStatus type for use with +// apply. +func DNSStatus() *DNSStatusApplyConfiguration { + return &DNSStatusApplyConfiguration{} +} + +// WithClusterIP sets the ClusterIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterIP field is set to the value of the last call. +func (b *DNSStatusApplyConfiguration) WithClusterIP(value string) *DNSStatusApplyConfiguration { + b.ClusterIP = &value + return b +} + +// WithClusterDomain sets the ClusterDomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterDomain field is set to the value of the last call. +func (b *DNSStatusApplyConfiguration) WithClusterDomain(value string) *DNSStatusApplyConfiguration { + b.ClusterDomain = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *DNSStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *DNSStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go new file mode 100644 index 0000000000..1b689670cf --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dnstransportconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// DNSTransportConfigApplyConfiguration represents a declarative configuration of the DNSTransportConfig type for use +// with apply. +type DNSTransportConfigApplyConfiguration struct { + Transport *operatorv1.DNSTransport `json:"transport,omitempty"` + TLS *DNSOverTLSConfigApplyConfiguration `json:"tls,omitempty"` +} + +// DNSTransportConfigApplyConfiguration constructs a declarative configuration of the DNSTransportConfig type for use with +// apply. +func DNSTransportConfig() *DNSTransportConfigApplyConfiguration { + return &DNSTransportConfigApplyConfiguration{} +} + +// WithTransport sets the Transport field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Transport field is set to the value of the last call. +func (b *DNSTransportConfigApplyConfiguration) WithTransport(value operatorv1.DNSTransport) *DNSTransportConfigApplyConfiguration { + b.Transport = &value + return b +} + +// WithTLS sets the TLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLS field is set to the value of the last call. +func (b *DNSTransportConfigApplyConfiguration) WithTLS(value *DNSOverTLSConfigApplyConfiguration) *DNSTransportConfigApplyConfiguration { + b.TLS = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/egressipconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/egressipconfig.go new file mode 100644 index 0000000000..f8c2cbeeed --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/egressipconfig.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// EgressIPConfigApplyConfiguration represents a declarative configuration of the EgressIPConfig type for use +// with apply. +type EgressIPConfigApplyConfiguration struct { + ReachabilityTotalTimeoutSeconds *uint32 `json:"reachabilityTotalTimeoutSeconds,omitempty"` +} + +// EgressIPConfigApplyConfiguration constructs a declarative configuration of the EgressIPConfig type for use with +// apply. +func EgressIPConfig() *EgressIPConfigApplyConfiguration { + return &EgressIPConfigApplyConfiguration{} +} + +// WithReachabilityTotalTimeoutSeconds sets the ReachabilityTotalTimeoutSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReachabilityTotalTimeoutSeconds field is set to the value of the last call. +func (b *EgressIPConfigApplyConfiguration) WithReachabilityTotalTimeoutSeconds(value uint32) *EgressIPConfigApplyConfiguration { + b.ReachabilityTotalTimeoutSeconds = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go new file mode 100644 index 0000000000..f4006d50c0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/endpointpublishingstrategy.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// EndpointPublishingStrategyApplyConfiguration represents a declarative configuration of the EndpointPublishingStrategy type for use +// with apply. +type EndpointPublishingStrategyApplyConfiguration struct { + Type *operatorv1.EndpointPublishingStrategyType `json:"type,omitempty"` + LoadBalancer *LoadBalancerStrategyApplyConfiguration `json:"loadBalancer,omitempty"` + HostNetwork *HostNetworkStrategyApplyConfiguration `json:"hostNetwork,omitempty"` + Private *PrivateStrategyApplyConfiguration `json:"private,omitempty"` + NodePort *NodePortStrategyApplyConfiguration `json:"nodePort,omitempty"` +} + +// EndpointPublishingStrategyApplyConfiguration constructs a declarative configuration of the EndpointPublishingStrategy type for use with +// apply. +func EndpointPublishingStrategy() *EndpointPublishingStrategyApplyConfiguration { + return &EndpointPublishingStrategyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *EndpointPublishingStrategyApplyConfiguration) WithType(value operatorv1.EndpointPublishingStrategyType) *EndpointPublishingStrategyApplyConfiguration { + b.Type = &value + return b +} + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *EndpointPublishingStrategyApplyConfiguration) WithLoadBalancer(value *LoadBalancerStrategyApplyConfiguration) *EndpointPublishingStrategyApplyConfiguration { + b.LoadBalancer = value + return b +} + +// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostNetwork field is set to the value of the last call. +func (b *EndpointPublishingStrategyApplyConfiguration) WithHostNetwork(value *HostNetworkStrategyApplyConfiguration) *EndpointPublishingStrategyApplyConfiguration { + b.HostNetwork = value + return b +} + +// WithPrivate sets the Private field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Private field is set to the value of the last call. +func (b *EndpointPublishingStrategyApplyConfiguration) WithPrivate(value *PrivateStrategyApplyConfiguration) *EndpointPublishingStrategyApplyConfiguration { + b.Private = value + return b +} + +// WithNodePort sets the NodePort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodePort field is set to the value of the last call. +func (b *EndpointPublishingStrategyApplyConfiguration) WithNodePort(value *NodePortStrategyApplyConfiguration) *EndpointPublishingStrategyApplyConfiguration { + b.NodePort = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go new file mode 100644 index 0000000000..de118401e2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// EtcdApplyConfiguration represents a declarative configuration of the Etcd type for use +// with apply. +type EtcdApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *EtcdSpecApplyConfiguration `json:"spec,omitempty"` + Status *EtcdStatusApplyConfiguration `json:"status,omitempty"` +} + +// Etcd constructs a declarative configuration of the Etcd type for use with +// apply. +func Etcd(name string) *EtcdApplyConfiguration { + b := &EtcdApplyConfiguration{} + b.WithName(name) + b.WithKind("Etcd") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractEtcd extracts the applied configuration owned by fieldManager from +// etcd. If no managedFields are found in etcd for fieldManager, a +// EtcdApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// etcd must be a unmodified Etcd API object that was retrieved from the Kubernetes API. +// ExtractEtcd provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractEtcd(etcd *operatorv1.Etcd, fieldManager string) (*EtcdApplyConfiguration, error) { + return extractEtcd(etcd, fieldManager, "") +} + +// ExtractEtcdStatus is the same as ExtractEtcd except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractEtcdStatus(etcd *operatorv1.Etcd, fieldManager string) (*EtcdApplyConfiguration, error) { + return extractEtcd(etcd, fieldManager, "status") +} + +func extractEtcd(etcd *operatorv1.Etcd, fieldManager string, subresource string) (*EtcdApplyConfiguration, error) { + b := &EtcdApplyConfiguration{} + err := managedfields.ExtractInto(etcd, internal.Parser().Type("com.github.openshift.api.operator.v1.Etcd"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(etcd.Name) + + b.WithKind("Etcd") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithKind(value string) *EtcdApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithAPIVersion(value string) *EtcdApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithName(value string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithGenerateName(value string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithNamespace(value string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithUID(value types.UID) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithResourceVersion(value string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithGeneration(value int64) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *EtcdApplyConfiguration) WithLabels(entries map[string]string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *EtcdApplyConfiguration) WithAnnotations(entries map[string]string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *EtcdApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *EtcdApplyConfiguration) WithFinalizers(values ...string) *EtcdApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *EtcdApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithSpec(value *EtcdSpecApplyConfiguration) *EtcdApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *EtcdApplyConfiguration) WithStatus(value *EtcdStatusApplyConfiguration) *EtcdApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EtcdApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go new file mode 100644 index 0000000000..6588c09224 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdspec.go @@ -0,0 +1,102 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// EtcdSpecApplyConfiguration represents a declarative configuration of the EtcdSpec type for use +// with apply. +type EtcdSpecApplyConfiguration struct { + StaticPodOperatorSpecApplyConfiguration `json:",inline"` + HardwareSpeed *operatorv1.ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed,omitempty"` + BackendQuotaGiB *int32 `json:"backendQuotaGiB,omitempty"` +} + +// EtcdSpecApplyConfiguration constructs a declarative configuration of the EtcdSpec type for use with +// apply. +func EtcdSpec() *EtcdSpecApplyConfiguration { + return &EtcdSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *EtcdSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *EtcdSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *EtcdSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *EtcdSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *EtcdSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithForceRedeploymentReason sets the ForceRedeploymentReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithForceRedeploymentReason(value string) *EtcdSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value + return b +} + +// WithFailedRevisionLimit sets the FailedRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedRevisionLimit field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *EtcdSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value + return b +} + +// WithSucceededRevisionLimit sets the SucceededRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *EtcdSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value + return b +} + +// WithHardwareSpeed sets the HardwareSpeed field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HardwareSpeed field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithHardwareSpeed(value operatorv1.ControlPlaneHardwareSpeed) *EtcdSpecApplyConfiguration { + b.HardwareSpeed = &value + return b +} + +// WithBackendQuotaGiB sets the BackendQuotaGiB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BackendQuotaGiB field is set to the value of the last call. +func (b *EtcdSpecApplyConfiguration) WithBackendQuotaGiB(value int32) *EtcdSpecApplyConfiguration { + b.BackendQuotaGiB = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go new file mode 100644 index 0000000000..a6fa6f07d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcdstatus.go @@ -0,0 +1,107 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// EtcdStatusApplyConfiguration represents a declarative configuration of the EtcdStatus type for use +// with apply. +type EtcdStatusApplyConfiguration struct { + StaticPodOperatorStatusApplyConfiguration `json:",inline"` + HardwareSpeed *operatorv1.ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed,omitempty"` +} + +// EtcdStatusApplyConfiguration constructs a declarative configuration of the EtcdStatus type for use with +// apply. +func EtcdStatus() *EtcdStatusApplyConfiguration { + return &EtcdStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *EtcdStatusApplyConfiguration) WithObservedGeneration(value int64) *EtcdStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *EtcdStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *EtcdStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *EtcdStatusApplyConfiguration) WithVersion(value string) *EtcdStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *EtcdStatusApplyConfiguration) WithReadyReplicas(value int32) *EtcdStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *EtcdStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *EtcdStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *EtcdStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *EtcdStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} + +// WithLatestAvailableRevisionReason sets the LatestAvailableRevisionReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. +func (b *EtcdStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *EtcdStatusApplyConfiguration { + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value + return b +} + +// WithNodeStatuses adds the given value to the NodeStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NodeStatuses field. +func (b *EtcdStatusApplyConfiguration) WithNodeStatuses(values ...*NodeStatusApplyConfiguration) *EtcdStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNodeStatuses") + } + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) + } + return b +} + +// WithHardwareSpeed sets the HardwareSpeed field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HardwareSpeed field is set to the value of the last call. +func (b *EtcdStatusApplyConfiguration) WithHardwareSpeed(value operatorv1.ControlPlaneHardwareSpeed) *EtcdStatusApplyConfiguration { + b.HardwareSpeed = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/exportnetworkflows.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/exportnetworkflows.go new file mode 100644 index 0000000000..2a41522482 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/exportnetworkflows.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExportNetworkFlowsApplyConfiguration represents a declarative configuration of the ExportNetworkFlows type for use +// with apply. +type ExportNetworkFlowsApplyConfiguration struct { + NetFlow *NetFlowConfigApplyConfiguration `json:"netFlow,omitempty"` + SFlow *SFlowConfigApplyConfiguration `json:"sFlow,omitempty"` + IPFIX *IPFIXConfigApplyConfiguration `json:"ipfix,omitempty"` +} + +// ExportNetworkFlowsApplyConfiguration constructs a declarative configuration of the ExportNetworkFlows type for use with +// apply. +func ExportNetworkFlows() *ExportNetworkFlowsApplyConfiguration { + return &ExportNetworkFlowsApplyConfiguration{} +} + +// WithNetFlow sets the NetFlow field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetFlow field is set to the value of the last call. +func (b *ExportNetworkFlowsApplyConfiguration) WithNetFlow(value *NetFlowConfigApplyConfiguration) *ExportNetworkFlowsApplyConfiguration { + b.NetFlow = value + return b +} + +// WithSFlow sets the SFlow field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SFlow field is set to the value of the last call. +func (b *ExportNetworkFlowsApplyConfiguration) WithSFlow(value *SFlowConfigApplyConfiguration) *ExportNetworkFlowsApplyConfiguration { + b.SFlow = value + return b +} + +// WithIPFIX sets the IPFIX field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPFIX field is set to the value of the last call. +func (b *ExportNetworkFlowsApplyConfiguration) WithIPFIX(value *IPFIXConfigApplyConfiguration) *ExportNetworkFlowsApplyConfiguration { + b.IPFIX = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/featuresmigration.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/featuresmigration.go new file mode 100644 index 0000000000..aec6910799 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/featuresmigration.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// FeaturesMigrationApplyConfiguration represents a declarative configuration of the FeaturesMigration type for use +// with apply. +type FeaturesMigrationApplyConfiguration struct { + EgressIP *bool `json:"egressIP,omitempty"` + EgressFirewall *bool `json:"egressFirewall,omitempty"` + Multicast *bool `json:"multicast,omitempty"` +} + +// FeaturesMigrationApplyConfiguration constructs a declarative configuration of the FeaturesMigration type for use with +// apply. +func FeaturesMigration() *FeaturesMigrationApplyConfiguration { + return &FeaturesMigrationApplyConfiguration{} +} + +// WithEgressIP sets the EgressIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EgressIP field is set to the value of the last call. +func (b *FeaturesMigrationApplyConfiguration) WithEgressIP(value bool) *FeaturesMigrationApplyConfiguration { + b.EgressIP = &value + return b +} + +// WithEgressFirewall sets the EgressFirewall field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EgressFirewall field is set to the value of the last call. +func (b *FeaturesMigrationApplyConfiguration) WithEgressFirewall(value bool) *FeaturesMigrationApplyConfiguration { + b.EgressFirewall = &value + return b +} + +// WithMulticast sets the Multicast field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Multicast field is set to the value of the last call. +func (b *FeaturesMigrationApplyConfiguration) WithMulticast(value bool) *FeaturesMigrationApplyConfiguration { + b.Multicast = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/filereferencesource.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/filereferencesource.go new file mode 100644 index 0000000000..bd8c6f54c0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/filereferencesource.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// FileReferenceSourceApplyConfiguration represents a declarative configuration of the FileReferenceSource type for use +// with apply. +type FileReferenceSourceApplyConfiguration struct { + From *operatorv1.SourceType `json:"from,omitempty"` + ConfigMap *ConfigMapFileReferenceApplyConfiguration `json:"configMap,omitempty"` +} + +// FileReferenceSourceApplyConfiguration constructs a declarative configuration of the FileReferenceSource type for use with +// apply. +func FileReferenceSource() *FileReferenceSourceApplyConfiguration { + return &FileReferenceSourceApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *FileReferenceSourceApplyConfiguration) WithFrom(value operatorv1.SourceType) *FileReferenceSourceApplyConfiguration { + b.From = &value + return b +} + +// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigMap field is set to the value of the last call. +func (b *FileReferenceSourceApplyConfiguration) WithConfigMap(value *ConfigMapFileReferenceApplyConfiguration) *FileReferenceSourceApplyConfiguration { + b.ConfigMap = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go new file mode 100644 index 0000000000..5d0112b460 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go @@ -0,0 +1,56 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ForwardPluginApplyConfiguration represents a declarative configuration of the ForwardPlugin type for use +// with apply. +type ForwardPluginApplyConfiguration struct { + Upstreams []string `json:"upstreams,omitempty"` + Policy *operatorv1.ForwardingPolicy `json:"policy,omitempty"` + TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` + ProtocolStrategy *operatorv1.ProtocolStrategy `json:"protocolStrategy,omitempty"` +} + +// ForwardPluginApplyConfiguration constructs a declarative configuration of the ForwardPlugin type for use with +// apply. +func ForwardPlugin() *ForwardPluginApplyConfiguration { + return &ForwardPluginApplyConfiguration{} +} + +// WithUpstreams adds the given value to the Upstreams field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Upstreams field. +func (b *ForwardPluginApplyConfiguration) WithUpstreams(values ...string) *ForwardPluginApplyConfiguration { + for i := range values { + b.Upstreams = append(b.Upstreams, values[i]) + } + return b +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *ForwardPluginApplyConfiguration) WithPolicy(value operatorv1.ForwardingPolicy) *ForwardPluginApplyConfiguration { + b.Policy = &value + return b +} + +// WithTransportConfig sets the TransportConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TransportConfig field is set to the value of the last call. +func (b *ForwardPluginApplyConfiguration) WithTransportConfig(value *DNSTransportConfigApplyConfiguration) *ForwardPluginApplyConfiguration { + b.TransportConfig = value + return b +} + +// WithProtocolStrategy sets the ProtocolStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProtocolStrategy field is set to the value of the last call. +func (b *ForwardPluginApplyConfiguration) WithProtocolStrategy(value operatorv1.ProtocolStrategy) *ForwardPluginApplyConfiguration { + b.ProtocolStrategy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go new file mode 100644 index 0000000000..a18f0400c5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// GatewayConfigApplyConfiguration represents a declarative configuration of the GatewayConfig type for use +// with apply. +type GatewayConfigApplyConfiguration struct { + RoutingViaHost *bool `json:"routingViaHost,omitempty"` + IPForwarding *operatorv1.IPForwardingMode `json:"ipForwarding,omitempty"` + IPv4 *IPv4GatewayConfigApplyConfiguration `json:"ipv4,omitempty"` + IPv6 *IPv6GatewayConfigApplyConfiguration `json:"ipv6,omitempty"` +} + +// GatewayConfigApplyConfiguration constructs a declarative configuration of the GatewayConfig type for use with +// apply. +func GatewayConfig() *GatewayConfigApplyConfiguration { + return &GatewayConfigApplyConfiguration{} +} + +// WithRoutingViaHost sets the RoutingViaHost field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RoutingViaHost field is set to the value of the last call. +func (b *GatewayConfigApplyConfiguration) WithRoutingViaHost(value bool) *GatewayConfigApplyConfiguration { + b.RoutingViaHost = &value + return b +} + +// WithIPForwarding sets the IPForwarding field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPForwarding field is set to the value of the last call. +func (b *GatewayConfigApplyConfiguration) WithIPForwarding(value operatorv1.IPForwardingMode) *GatewayConfigApplyConfiguration { + b.IPForwarding = &value + return b +} + +// WithIPv4 sets the IPv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv4 field is set to the value of the last call. +func (b *GatewayConfigApplyConfiguration) WithIPv4(value *IPv4GatewayConfigApplyConfiguration) *GatewayConfigApplyConfiguration { + b.IPv4 = value + return b +} + +// WithIPv6 sets the IPv6 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6 field is set to the value of the last call. +func (b *GatewayConfigApplyConfiguration) WithIPv6(value *IPv6GatewayConfigApplyConfiguration) *GatewayConfigApplyConfiguration { + b.IPv6 = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go new file mode 100644 index 0000000000..b2fd36c264 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gathererstatus.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// GathererStatusApplyConfiguration represents a declarative configuration of the GathererStatus type for use +// with apply. +type GathererStatusApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Name *string `json:"name,omitempty"` + LastGatherDuration *apismetav1.Duration `json:"lastGatherDuration,omitempty"` +} + +// GathererStatusApplyConfiguration constructs a declarative configuration of the GathererStatus type for use with +// apply. +func GathererStatus() *GathererStatusApplyConfiguration { + return &GathererStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *GathererStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *GathererStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GathererStatusApplyConfiguration) WithName(value string) *GathererStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithLastGatherDuration sets the LastGatherDuration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastGatherDuration field is set to the value of the last call. +func (b *GathererStatusApplyConfiguration) WithLastGatherDuration(value apismetav1.Duration) *GathererStatusApplyConfiguration { + b.LastGatherDuration = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go new file mode 100644 index 0000000000..e2601419af --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatherstatus.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GatherStatusApplyConfiguration represents a declarative configuration of the GatherStatus type for use +// with apply. +type GatherStatusApplyConfiguration struct { + LastGatherTime *metav1.Time `json:"lastGatherTime,omitempty"` + LastGatherDuration *metav1.Duration `json:"lastGatherDuration,omitempty"` + Gatherers []GathererStatusApplyConfiguration `json:"gatherers,omitempty"` +} + +// GatherStatusApplyConfiguration constructs a declarative configuration of the GatherStatus type for use with +// apply. +func GatherStatus() *GatherStatusApplyConfiguration { + return &GatherStatusApplyConfiguration{} +} + +// WithLastGatherTime sets the LastGatherTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastGatherTime field is set to the value of the last call. +func (b *GatherStatusApplyConfiguration) WithLastGatherTime(value metav1.Time) *GatherStatusApplyConfiguration { + b.LastGatherTime = &value + return b +} + +// WithLastGatherDuration sets the LastGatherDuration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastGatherDuration field is set to the value of the last call. +func (b *GatherStatusApplyConfiguration) WithLastGatherDuration(value metav1.Duration) *GatherStatusApplyConfiguration { + b.LastGatherDuration = &value + return b +} + +// WithGatherers adds the given value to the Gatherers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Gatherers field. +func (b *GatherStatusApplyConfiguration) WithGatherers(values ...*GathererStatusApplyConfiguration) *GatherStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGatherers") + } + b.Gatherers = append(b.Gatherers, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go new file mode 100644 index 0000000000..e8a6d3c783 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPCSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the GCPCSIDriverConfigSpec type for use +// with apply. +type GCPCSIDriverConfigSpecApplyConfiguration struct { + KMSKey *GCPKMSKeyReferenceApplyConfiguration `json:"kmsKey,omitempty"` +} + +// GCPCSIDriverConfigSpecApplyConfiguration constructs a declarative configuration of the GCPCSIDriverConfigSpec type for use with +// apply. +func GCPCSIDriverConfigSpec() *GCPCSIDriverConfigSpecApplyConfiguration { + return &GCPCSIDriverConfigSpecApplyConfiguration{} +} + +// WithKMSKey sets the KMSKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KMSKey field is set to the value of the last call. +func (b *GCPCSIDriverConfigSpecApplyConfiguration) WithKMSKey(value *GCPKMSKeyReferenceApplyConfiguration) *GCPCSIDriverConfigSpecApplyConfiguration { + b.KMSKey = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go new file mode 100644 index 0000000000..f443b5a59f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPKMSKeyReferenceApplyConfiguration represents a declarative configuration of the GCPKMSKeyReference type for use +// with apply. +type GCPKMSKeyReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + KeyRing *string `json:"keyRing,omitempty"` + ProjectID *string `json:"projectID,omitempty"` + Location *string `json:"location,omitempty"` +} + +// GCPKMSKeyReferenceApplyConfiguration constructs a declarative configuration of the GCPKMSKeyReference type for use with +// apply. +func GCPKMSKeyReference() *GCPKMSKeyReferenceApplyConfiguration { + return &GCPKMSKeyReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithName(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithKeyRing sets the KeyRing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeyRing field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithKeyRing(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.KeyRing = &value + return b +} + +// WithProjectID sets the ProjectID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectID field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithProjectID(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.ProjectID = &value + return b +} + +// WithLocation sets the Location field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Location field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithLocation(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.Location = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go new file mode 100644 index 0000000000..dbb6217201 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcploadbalancerparameters.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// GCPLoadBalancerParametersApplyConfiguration represents a declarative configuration of the GCPLoadBalancerParameters type for use +// with apply. +type GCPLoadBalancerParametersApplyConfiguration struct { + ClientAccess *operatorv1.GCPClientAccess `json:"clientAccess,omitempty"` +} + +// GCPLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the GCPLoadBalancerParameters type for use with +// apply. +func GCPLoadBalancerParameters() *GCPLoadBalancerParametersApplyConfiguration { + return &GCPLoadBalancerParametersApplyConfiguration{} +} + +// WithClientAccess sets the ClientAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientAccess field is set to the value of the last call. +func (b *GCPLoadBalancerParametersApplyConfiguration) WithClientAccess(value operatorv1.GCPClientAccess) *GCPLoadBalancerParametersApplyConfiguration { + b.ClientAccess = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/generationstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/generationstatus.go new file mode 100644 index 0000000000..074b33869b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/generationstatus.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GenerationStatusApplyConfiguration represents a declarative configuration of the GenerationStatus type for use +// with apply. +type GenerationStatusApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + LastGeneration *int64 `json:"lastGeneration,omitempty"` + Hash *string `json:"hash,omitempty"` +} + +// GenerationStatusApplyConfiguration constructs a declarative configuration of the GenerationStatus type for use with +// apply. +func GenerationStatus() *GenerationStatusApplyConfiguration { + return &GenerationStatusApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *GenerationStatusApplyConfiguration) WithGroup(value string) *GenerationStatusApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *GenerationStatusApplyConfiguration) WithResource(value string) *GenerationStatusApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *GenerationStatusApplyConfiguration) WithNamespace(value string) *GenerationStatusApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GenerationStatusApplyConfiguration) WithName(value string) *GenerationStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithLastGeneration sets the LastGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastGeneration field is set to the value of the last call. +func (b *GenerationStatusApplyConfiguration) WithLastGeneration(value int64) *GenerationStatusApplyConfiguration { + b.LastGeneration = &value + return b +} + +// WithHash sets the Hash field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hash field is set to the value of the last call. +func (b *GenerationStatusApplyConfiguration) WithHash(value string) *GenerationStatusApplyConfiguration { + b.Hash = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go new file mode 100644 index 0000000000..0f4cfac5a6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/healthcheck.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// HealthCheckApplyConfiguration represents a declarative configuration of the HealthCheck type for use +// with apply. +type HealthCheckApplyConfiguration struct { + Description *string `json:"description,omitempty"` + TotalRisk *int32 `json:"totalRisk,omitempty"` + AdvisorURI *string `json:"advisorURI,omitempty"` + State *operatorv1.HealthCheckState `json:"state,omitempty"` +} + +// HealthCheckApplyConfiguration constructs a declarative configuration of the HealthCheck type for use with +// apply. +func HealthCheck() *HealthCheckApplyConfiguration { + return &HealthCheckApplyConfiguration{} +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *HealthCheckApplyConfiguration) WithDescription(value string) *HealthCheckApplyConfiguration { + b.Description = &value + return b +} + +// WithTotalRisk sets the TotalRisk field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TotalRisk field is set to the value of the last call. +func (b *HealthCheckApplyConfiguration) WithTotalRisk(value int32) *HealthCheckApplyConfiguration { + b.TotalRisk = &value + return b +} + +// WithAdvisorURI sets the AdvisorURI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdvisorURI field is set to the value of the last call. +func (b *HealthCheckApplyConfiguration) WithAdvisorURI(value string) *HealthCheckApplyConfiguration { + b.AdvisorURI = &value + return b +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *HealthCheckApplyConfiguration) WithState(value operatorv1.HealthCheckState) *HealthCheckApplyConfiguration { + b.State = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go new file mode 100644 index 0000000000..a667c16f39 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hostnetworkstrategy.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// HostNetworkStrategyApplyConfiguration represents a declarative configuration of the HostNetworkStrategy type for use +// with apply. +type HostNetworkStrategyApplyConfiguration struct { + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` + HTTPPort *int32 `json:"httpPort,omitempty"` + HTTPSPort *int32 `json:"httpsPort,omitempty"` + StatsPort *int32 `json:"statsPort,omitempty"` +} + +// HostNetworkStrategyApplyConfiguration constructs a declarative configuration of the HostNetworkStrategy type for use with +// apply. +func HostNetworkStrategy() *HostNetworkStrategyApplyConfiguration { + return &HostNetworkStrategyApplyConfiguration{} +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *HostNetworkStrategyApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *HostNetworkStrategyApplyConfiguration { + b.Protocol = &value + return b +} + +// WithHTTPPort sets the HTTPPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPPort field is set to the value of the last call. +func (b *HostNetworkStrategyApplyConfiguration) WithHTTPPort(value int32) *HostNetworkStrategyApplyConfiguration { + b.HTTPPort = &value + return b +} + +// WithHTTPSPort sets the HTTPSPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSPort field is set to the value of the last call. +func (b *HostNetworkStrategyApplyConfiguration) WithHTTPSPort(value int32) *HostNetworkStrategyApplyConfiguration { + b.HTTPSPort = &value + return b +} + +// WithStatsPort sets the StatsPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StatsPort field is set to the value of the last call. +func (b *HostNetworkStrategyApplyConfiguration) WithStatsPort(value int32) *HostNetworkStrategyApplyConfiguration { + b.StatsPort = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go new file mode 100644 index 0000000000..cd83a0461c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/httpcompressionpolicy.go @@ -0,0 +1,29 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// HTTPCompressionPolicyApplyConfiguration represents a declarative configuration of the HTTPCompressionPolicy type for use +// with apply. +type HTTPCompressionPolicyApplyConfiguration struct { + MimeTypes []operatorv1.CompressionMIMEType `json:"mimeTypes,omitempty"` +} + +// HTTPCompressionPolicyApplyConfiguration constructs a declarative configuration of the HTTPCompressionPolicy type for use with +// apply. +func HTTPCompressionPolicy() *HTTPCompressionPolicyApplyConfiguration { + return &HTTPCompressionPolicyApplyConfiguration{} +} + +// WithMimeTypes adds the given value to the MimeTypes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MimeTypes field. +func (b *HTTPCompressionPolicyApplyConfiguration) WithMimeTypes(values ...operatorv1.CompressionMIMEType) *HTTPCompressionPolicyApplyConfiguration { + for i := range values { + b.MimeTypes = append(b.MimeTypes, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hybridoverlayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hybridoverlayconfig.go new file mode 100644 index 0000000000..71bd65561c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/hybridoverlayconfig.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HybridOverlayConfigApplyConfiguration represents a declarative configuration of the HybridOverlayConfig type for use +// with apply. +type HybridOverlayConfigApplyConfiguration struct { + HybridClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"hybridClusterNetwork,omitempty"` + HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` +} + +// HybridOverlayConfigApplyConfiguration constructs a declarative configuration of the HybridOverlayConfig type for use with +// apply. +func HybridOverlayConfig() *HybridOverlayConfigApplyConfiguration { + return &HybridOverlayConfigApplyConfiguration{} +} + +// WithHybridClusterNetwork adds the given value to the HybridClusterNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HybridClusterNetwork field. +func (b *HybridOverlayConfigApplyConfiguration) WithHybridClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *HybridOverlayConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHybridClusterNetwork") + } + b.HybridClusterNetwork = append(b.HybridClusterNetwork, *values[i]) + } + return b +} + +// WithHybridOverlayVXLANPort sets the HybridOverlayVXLANPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HybridOverlayVXLANPort field is set to the value of the last call. +func (b *HybridOverlayConfigApplyConfiguration) WithHybridOverlayVXLANPort(value uint32) *HybridOverlayConfigApplyConfiguration { + b.HybridOverlayVXLANPort = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmcloudcsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmcloudcsidriverconfigspec.go new file mode 100644 index 0000000000..56069b7c1c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmcloudcsidriverconfigspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IBMCloudCSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the IBMCloudCSIDriverConfigSpec type for use +// with apply. +type IBMCloudCSIDriverConfigSpecApplyConfiguration struct { + EncryptionKeyCRN *string `json:"encryptionKeyCRN,omitempty"` +} + +// IBMCloudCSIDriverConfigSpecApplyConfiguration constructs a declarative configuration of the IBMCloudCSIDriverConfigSpec type for use with +// apply. +func IBMCloudCSIDriverConfigSpec() *IBMCloudCSIDriverConfigSpecApplyConfiguration { + return &IBMCloudCSIDriverConfigSpecApplyConfiguration{} +} + +// WithEncryptionKeyCRN sets the EncryptionKeyCRN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EncryptionKeyCRN field is set to the value of the last call. +func (b *IBMCloudCSIDriverConfigSpecApplyConfiguration) WithEncryptionKeyCRN(value string) *IBMCloudCSIDriverConfigSpecApplyConfiguration { + b.EncryptionKeyCRN = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go new file mode 100644 index 0000000000..065c61554d --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IBMLoadBalancerParametersApplyConfiguration represents a declarative configuration of the IBMLoadBalancerParameters type for use +// with apply. +type IBMLoadBalancerParametersApplyConfiguration struct { + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` +} + +// IBMLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the IBMLoadBalancerParameters type for use with +// apply. +func IBMLoadBalancerParameters() *IBMLoadBalancerParametersApplyConfiguration { + return &IBMLoadBalancerParametersApplyConfiguration{} +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IBMLoadBalancerParametersApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *IBMLoadBalancerParametersApplyConfiguration { + b.Protocol = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingress.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingress.go new file mode 100644 index 0000000000..2993a361fa --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingress.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use +// with apply. +type IngressApplyConfiguration struct { + ConsoleURL *string `json:"consoleURL,omitempty"` + ClientDownloadsURL *string `json:"clientDownloadsURL,omitempty"` +} + +// IngressApplyConfiguration constructs a declarative configuration of the Ingress type for use with +// apply. +func Ingress() *IngressApplyConfiguration { + return &IngressApplyConfiguration{} +} + +// WithConsoleURL sets the ConsoleURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConsoleURL field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithConsoleURL(value string) *IngressApplyConfiguration { + b.ConsoleURL = &value + return b +} + +// WithClientDownloadsURL sets the ClientDownloadsURL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientDownloadsURL field is set to the value of the last call. +func (b *IngressApplyConfiguration) WithClientDownloadsURL(value string) *IngressApplyConfiguration { + b.ClientDownloadsURL = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go new file mode 100644 index 0000000000..e231390144 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IngressControllerApplyConfiguration represents a declarative configuration of the IngressController type for use +// with apply. +type IngressControllerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressControllerSpecApplyConfiguration `json:"spec,omitempty"` + Status *IngressControllerStatusApplyConfiguration `json:"status,omitempty"` +} + +// IngressController constructs a declarative configuration of the IngressController type for use with +// apply. +func IngressController(name, namespace string) *IngressControllerApplyConfiguration { + b := &IngressControllerApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("IngressController") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractIngressController extracts the applied configuration owned by fieldManager from +// ingressController. If no managedFields are found in ingressController for fieldManager, a +// IngressControllerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// ingressController must be a unmodified IngressController API object that was retrieved from the Kubernetes API. +// ExtractIngressController provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIngressController(ingressController *operatorv1.IngressController, fieldManager string) (*IngressControllerApplyConfiguration, error) { + return extractIngressController(ingressController, fieldManager, "") +} + +// ExtractIngressControllerStatus is the same as ExtractIngressController except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIngressControllerStatus(ingressController *operatorv1.IngressController, fieldManager string) (*IngressControllerApplyConfiguration, error) { + return extractIngressController(ingressController, fieldManager, "status") +} + +func extractIngressController(ingressController *operatorv1.IngressController, fieldManager string, subresource string) (*IngressControllerApplyConfiguration, error) { + b := &IngressControllerApplyConfiguration{} + err := managedfields.ExtractInto(ingressController, internal.Parser().Type("com.github.openshift.api.operator.v1.IngressController"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(ingressController.Name) + b.WithNamespace(ingressController.Namespace) + + b.WithKind("IngressController") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithKind(value string) *IngressControllerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithAPIVersion(value string) *IngressControllerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithName(value string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithGenerateName(value string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithNamespace(value string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithUID(value types.UID) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithResourceVersion(value string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithGeneration(value int64) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IngressControllerApplyConfiguration) WithLabels(entries map[string]string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IngressControllerApplyConfiguration) WithAnnotations(entries map[string]string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IngressControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IngressControllerApplyConfiguration) WithFinalizers(values ...string) *IngressControllerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *IngressControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithSpec(value *IngressControllerSpecApplyConfiguration) *IngressControllerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *IngressControllerApplyConfiguration) WithStatus(value *IngressControllerStatusApplyConfiguration) *IngressControllerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressControllerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go new file mode 100644 index 0000000000..dbcd3d9e56 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookie.go @@ -0,0 +1,52 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IngressControllerCaptureHTTPCookieApplyConfiguration represents a declarative configuration of the IngressControllerCaptureHTTPCookie type for use +// with apply. +type IngressControllerCaptureHTTPCookieApplyConfiguration struct { + IngressControllerCaptureHTTPCookieUnionApplyConfiguration `json:",inline"` + MaxLength *int `json:"maxLength,omitempty"` +} + +// IngressControllerCaptureHTTPCookieApplyConfiguration constructs a declarative configuration of the IngressControllerCaptureHTTPCookie type for use with +// apply. +func IngressControllerCaptureHTTPCookie() *IngressControllerCaptureHTTPCookieApplyConfiguration { + return &IngressControllerCaptureHTTPCookieApplyConfiguration{} +} + +// WithMatchType sets the MatchType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchType field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithMatchType(value operatorv1.CookieMatchType) *IngressControllerCaptureHTTPCookieApplyConfiguration { + b.IngressControllerCaptureHTTPCookieUnionApplyConfiguration.MatchType = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithName(value string) *IngressControllerCaptureHTTPCookieApplyConfiguration { + b.IngressControllerCaptureHTTPCookieUnionApplyConfiguration.Name = &value + return b +} + +// WithNamePrefix sets the NamePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamePrefix field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithNamePrefix(value string) *IngressControllerCaptureHTTPCookieApplyConfiguration { + b.IngressControllerCaptureHTTPCookieUnionApplyConfiguration.NamePrefix = &value + return b +} + +// WithMaxLength sets the MaxLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLength field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieApplyConfiguration) WithMaxLength(value int) *IngressControllerCaptureHTTPCookieApplyConfiguration { + b.MaxLength = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go new file mode 100644 index 0000000000..374621a87f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpcookieunion.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IngressControllerCaptureHTTPCookieUnionApplyConfiguration represents a declarative configuration of the IngressControllerCaptureHTTPCookieUnion type for use +// with apply. +type IngressControllerCaptureHTTPCookieUnionApplyConfiguration struct { + MatchType *operatorv1.CookieMatchType `json:"matchType,omitempty"` + Name *string `json:"name,omitempty"` + NamePrefix *string `json:"namePrefix,omitempty"` +} + +// IngressControllerCaptureHTTPCookieUnionApplyConfiguration constructs a declarative configuration of the IngressControllerCaptureHTTPCookieUnion type for use with +// apply. +func IngressControllerCaptureHTTPCookieUnion() *IngressControllerCaptureHTTPCookieUnionApplyConfiguration { + return &IngressControllerCaptureHTTPCookieUnionApplyConfiguration{} +} + +// WithMatchType sets the MatchType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchType field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieUnionApplyConfiguration) WithMatchType(value operatorv1.CookieMatchType) *IngressControllerCaptureHTTPCookieUnionApplyConfiguration { + b.MatchType = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieUnionApplyConfiguration) WithName(value string) *IngressControllerCaptureHTTPCookieUnionApplyConfiguration { + b.Name = &value + return b +} + +// WithNamePrefix sets the NamePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamePrefix field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPCookieUnionApplyConfiguration) WithNamePrefix(value string) *IngressControllerCaptureHTTPCookieUnionApplyConfiguration { + b.NamePrefix = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheader.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheader.go new file mode 100644 index 0000000000..88b2166b71 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheader.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerCaptureHTTPHeaderApplyConfiguration represents a declarative configuration of the IngressControllerCaptureHTTPHeader type for use +// with apply. +type IngressControllerCaptureHTTPHeaderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` +} + +// IngressControllerCaptureHTTPHeaderApplyConfiguration constructs a declarative configuration of the IngressControllerCaptureHTTPHeader type for use with +// apply. +func IngressControllerCaptureHTTPHeader() *IngressControllerCaptureHTTPHeaderApplyConfiguration { + return &IngressControllerCaptureHTTPHeaderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPHeaderApplyConfiguration) WithName(value string) *IngressControllerCaptureHTTPHeaderApplyConfiguration { + b.Name = &value + return b +} + +// WithMaxLength sets the MaxLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLength field is set to the value of the last call. +func (b *IngressControllerCaptureHTTPHeaderApplyConfiguration) WithMaxLength(value int) *IngressControllerCaptureHTTPHeaderApplyConfiguration { + b.MaxLength = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheaders.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheaders.go new file mode 100644 index 0000000000..02f7173cf0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollercapturehttpheaders.go @@ -0,0 +1,42 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerCaptureHTTPHeadersApplyConfiguration represents a declarative configuration of the IngressControllerCaptureHTTPHeaders type for use +// with apply. +type IngressControllerCaptureHTTPHeadersApplyConfiguration struct { + Request []IngressControllerCaptureHTTPHeaderApplyConfiguration `json:"request,omitempty"` + Response []IngressControllerCaptureHTTPHeaderApplyConfiguration `json:"response,omitempty"` +} + +// IngressControllerCaptureHTTPHeadersApplyConfiguration constructs a declarative configuration of the IngressControllerCaptureHTTPHeaders type for use with +// apply. +func IngressControllerCaptureHTTPHeaders() *IngressControllerCaptureHTTPHeadersApplyConfiguration { + return &IngressControllerCaptureHTTPHeadersApplyConfiguration{} +} + +// WithRequest adds the given value to the Request field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Request field. +func (b *IngressControllerCaptureHTTPHeadersApplyConfiguration) WithRequest(values ...*IngressControllerCaptureHTTPHeaderApplyConfiguration) *IngressControllerCaptureHTTPHeadersApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequest") + } + b.Request = append(b.Request, *values[i]) + } + return b +} + +// WithResponse adds the given value to the Response field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Response field. +func (b *IngressControllerCaptureHTTPHeadersApplyConfiguration) WithResponse(values ...*IngressControllerCaptureHTTPHeaderApplyConfiguration) *IngressControllerCaptureHTTPHeadersApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResponse") + } + b.Response = append(b.Response, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheader.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheader.go new file mode 100644 index 0000000000..f09d725b15 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheader.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerHTTPHeaderApplyConfiguration represents a declarative configuration of the IngressControllerHTTPHeader type for use +// with apply. +type IngressControllerHTTPHeaderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Action *IngressControllerHTTPHeaderActionUnionApplyConfiguration `json:"action,omitempty"` +} + +// IngressControllerHTTPHeaderApplyConfiguration constructs a declarative configuration of the IngressControllerHTTPHeader type for use with +// apply. +func IngressControllerHTTPHeader() *IngressControllerHTTPHeaderApplyConfiguration { + return &IngressControllerHTTPHeaderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressControllerHTTPHeaderApplyConfiguration) WithName(value string) *IngressControllerHTTPHeaderApplyConfiguration { + b.Name = &value + return b +} + +// WithAction sets the Action field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Action field is set to the value of the last call. +func (b *IngressControllerHTTPHeaderApplyConfiguration) WithAction(value *IngressControllerHTTPHeaderActionUnionApplyConfiguration) *IngressControllerHTTPHeaderApplyConfiguration { + b.Action = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactions.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactions.go new file mode 100644 index 0000000000..aa4b176c4e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactions.go @@ -0,0 +1,42 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerHTTPHeaderActionsApplyConfiguration represents a declarative configuration of the IngressControllerHTTPHeaderActions type for use +// with apply. +type IngressControllerHTTPHeaderActionsApplyConfiguration struct { + Response []IngressControllerHTTPHeaderApplyConfiguration `json:"response,omitempty"` + Request []IngressControllerHTTPHeaderApplyConfiguration `json:"request,omitempty"` +} + +// IngressControllerHTTPHeaderActionsApplyConfiguration constructs a declarative configuration of the IngressControllerHTTPHeaderActions type for use with +// apply. +func IngressControllerHTTPHeaderActions() *IngressControllerHTTPHeaderActionsApplyConfiguration { + return &IngressControllerHTTPHeaderActionsApplyConfiguration{} +} + +// WithResponse adds the given value to the Response field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Response field. +func (b *IngressControllerHTTPHeaderActionsApplyConfiguration) WithResponse(values ...*IngressControllerHTTPHeaderApplyConfiguration) *IngressControllerHTTPHeaderActionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResponse") + } + b.Response = append(b.Response, *values[i]) + } + return b +} + +// WithRequest adds the given value to the Request field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Request field. +func (b *IngressControllerHTTPHeaderActionsApplyConfiguration) WithRequest(values ...*IngressControllerHTTPHeaderApplyConfiguration) *IngressControllerHTTPHeaderActionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequest") + } + b.Request = append(b.Request, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go new file mode 100644 index 0000000000..f6b1461062 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaderactionunion.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IngressControllerHTTPHeaderActionUnionApplyConfiguration represents a declarative configuration of the IngressControllerHTTPHeaderActionUnion type for use +// with apply. +type IngressControllerHTTPHeaderActionUnionApplyConfiguration struct { + Type *operatorv1.IngressControllerHTTPHeaderActionType `json:"type,omitempty"` + Set *IngressControllerSetHTTPHeaderApplyConfiguration `json:"set,omitempty"` +} + +// IngressControllerHTTPHeaderActionUnionApplyConfiguration constructs a declarative configuration of the IngressControllerHTTPHeaderActionUnion type for use with +// apply. +func IngressControllerHTTPHeaderActionUnion() *IngressControllerHTTPHeaderActionUnionApplyConfiguration { + return &IngressControllerHTTPHeaderActionUnionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *IngressControllerHTTPHeaderActionUnionApplyConfiguration) WithType(value operatorv1.IngressControllerHTTPHeaderActionType) *IngressControllerHTTPHeaderActionUnionApplyConfiguration { + b.Type = &value + return b +} + +// WithSet sets the Set field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Set field is set to the value of the last call. +func (b *IngressControllerHTTPHeaderActionUnionApplyConfiguration) WithSet(value *IngressControllerSetHTTPHeaderApplyConfiguration) *IngressControllerHTTPHeaderActionUnionApplyConfiguration { + b.Set = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go new file mode 100644 index 0000000000..a972c1fcb2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpheaders.go @@ -0,0 +1,56 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IngressControllerHTTPHeadersApplyConfiguration represents a declarative configuration of the IngressControllerHTTPHeaders type for use +// with apply. +type IngressControllerHTTPHeadersApplyConfiguration struct { + ForwardedHeaderPolicy *operatorv1.IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` + UniqueId *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration `json:"uniqueId,omitempty"` + HeaderNameCaseAdjustments []operatorv1.IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` + Actions *IngressControllerHTTPHeaderActionsApplyConfiguration `json:"actions,omitempty"` +} + +// IngressControllerHTTPHeadersApplyConfiguration constructs a declarative configuration of the IngressControllerHTTPHeaders type for use with +// apply. +func IngressControllerHTTPHeaders() *IngressControllerHTTPHeadersApplyConfiguration { + return &IngressControllerHTTPHeadersApplyConfiguration{} +} + +// WithForwardedHeaderPolicy sets the ForwardedHeaderPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForwardedHeaderPolicy field is set to the value of the last call. +func (b *IngressControllerHTTPHeadersApplyConfiguration) WithForwardedHeaderPolicy(value operatorv1.IngressControllerHTTPHeaderPolicy) *IngressControllerHTTPHeadersApplyConfiguration { + b.ForwardedHeaderPolicy = &value + return b +} + +// WithUniqueId sets the UniqueId field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UniqueId field is set to the value of the last call. +func (b *IngressControllerHTTPHeadersApplyConfiguration) WithUniqueId(value *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration) *IngressControllerHTTPHeadersApplyConfiguration { + b.UniqueId = value + return b +} + +// WithHeaderNameCaseAdjustments adds the given value to the HeaderNameCaseAdjustments field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HeaderNameCaseAdjustments field. +func (b *IngressControllerHTTPHeadersApplyConfiguration) WithHeaderNameCaseAdjustments(values ...operatorv1.IngressControllerHTTPHeaderNameCaseAdjustment) *IngressControllerHTTPHeadersApplyConfiguration { + for i := range values { + b.HeaderNameCaseAdjustments = append(b.HeaderNameCaseAdjustments, values[i]) + } + return b +} + +// WithActions sets the Actions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Actions field is set to the value of the last call. +func (b *IngressControllerHTTPHeadersApplyConfiguration) WithActions(value *IngressControllerHTTPHeaderActionsApplyConfiguration) *IngressControllerHTTPHeadersApplyConfiguration { + b.Actions = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpuniqueidheaderpolicy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpuniqueidheaderpolicy.go new file mode 100644 index 0000000000..ecd183654a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerhttpuniqueidheaderpolicy.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration represents a declarative configuration of the IngressControllerHTTPUniqueIdHeaderPolicy type for use +// with apply. +type IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Format *string `json:"format,omitempty"` +} + +// IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration constructs a declarative configuration of the IngressControllerHTTPUniqueIdHeaderPolicy type for use with +// apply. +func IngressControllerHTTPUniqueIdHeaderPolicy() *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration { + return &IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration) WithName(value string) *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration { + b.Name = &value + return b +} + +// WithFormat sets the Format field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Format field is set to the value of the last call. +func (b *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration) WithFormat(value string) *IngressControllerHTTPUniqueIdHeaderPolicyApplyConfiguration { + b.Format = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerlogging.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerlogging.go new file mode 100644 index 0000000000..881cf27f97 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerlogging.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerLoggingApplyConfiguration represents a declarative configuration of the IngressControllerLogging type for use +// with apply. +type IngressControllerLoggingApplyConfiguration struct { + Access *AccessLoggingApplyConfiguration `json:"access,omitempty"` +} + +// IngressControllerLoggingApplyConfiguration constructs a declarative configuration of the IngressControllerLogging type for use with +// apply. +func IngressControllerLogging() *IngressControllerLoggingApplyConfiguration { + return &IngressControllerLoggingApplyConfiguration{} +} + +// WithAccess sets the Access field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Access field is set to the value of the last call. +func (b *IngressControllerLoggingApplyConfiguration) WithAccess(value *AccessLoggingApplyConfiguration) *IngressControllerLoggingApplyConfiguration { + b.Access = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollersethttpheader.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollersethttpheader.go new file mode 100644 index 0000000000..edad60be8e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollersethttpheader.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressControllerSetHTTPHeaderApplyConfiguration represents a declarative configuration of the IngressControllerSetHTTPHeader type for use +// with apply. +type IngressControllerSetHTTPHeaderApplyConfiguration struct { + Value *string `json:"value,omitempty"` +} + +// IngressControllerSetHTTPHeaderApplyConfiguration constructs a declarative configuration of the IngressControllerSetHTTPHeader type for use with +// apply. +func IngressControllerSetHTTPHeader() *IngressControllerSetHTTPHeaderApplyConfiguration { + return &IngressControllerSetHTTPHeaderApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *IngressControllerSetHTTPHeaderApplyConfiguration) WithValue(value string) *IngressControllerSetHTTPHeaderApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go new file mode 100644 index 0000000000..ae23fe636a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go @@ -0,0 +1,184 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IngressControllerSpecApplyConfiguration represents a declarative configuration of the IngressControllerSpec type for use +// with apply. +type IngressControllerSpecApplyConfiguration struct { + Domain *string `json:"domain,omitempty"` + HttpErrorCodePages *configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` + NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` + RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` + Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` + HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` + HTTPEmptyRequestsPolicy *operatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` + TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` + UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` + HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` + IdleConnectionTerminationPolicy *operatorv1.IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` +} + +// IngressControllerSpecApplyConfiguration constructs a declarative configuration of the IngressControllerSpec type for use with +// apply. +func IngressControllerSpec() *IngressControllerSpecApplyConfiguration { + return &IngressControllerSpecApplyConfiguration{} +} + +// WithDomain sets the Domain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Domain field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithDomain(value string) *IngressControllerSpecApplyConfiguration { + b.Domain = &value + return b +} + +// WithHttpErrorCodePages sets the HttpErrorCodePages field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HttpErrorCodePages field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithHttpErrorCodePages(value configv1.ConfigMapNameReference) *IngressControllerSpecApplyConfiguration { + b.HttpErrorCodePages = &value + return b +} + +// WithReplicas sets the Replicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Replicas field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithReplicas(value int32) *IngressControllerSpecApplyConfiguration { + b.Replicas = &value + return b +} + +// WithEndpointPublishingStrategy sets the EndpointPublishingStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EndpointPublishingStrategy field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithEndpointPublishingStrategy(value *EndpointPublishingStrategyApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.EndpointPublishingStrategy = value + return b +} + +// WithDefaultCertificate sets the DefaultCertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultCertificate field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithDefaultCertificate(value corev1.LocalObjectReference) *IngressControllerSpecApplyConfiguration { + b.DefaultCertificate = &value + return b +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithRouteSelector sets the RouteSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouteSelector field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithRouteSelector(value *metav1.LabelSelectorApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.RouteSelector = value + return b +} + +// WithNodePlacement sets the NodePlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodePlacement field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithNodePlacement(value *NodePlacementApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.NodePlacement = value + return b +} + +// WithTLSSecurityProfile sets the TLSSecurityProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSSecurityProfile field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithTLSSecurityProfile(value configv1.TLSSecurityProfile) *IngressControllerSpecApplyConfiguration { + b.TLSSecurityProfile = &value + return b +} + +// WithClientTLS sets the ClientTLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientTLS field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithClientTLS(value *ClientTLSApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.ClientTLS = value + return b +} + +// WithRouteAdmission sets the RouteAdmission field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouteAdmission field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithRouteAdmission(value *RouteAdmissionPolicyApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.RouteAdmission = value + return b +} + +// WithLogging sets the Logging field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Logging field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithLogging(value *IngressControllerLoggingApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.Logging = value + return b +} + +// WithHTTPHeaders sets the HTTPHeaders field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPHeaders field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithHTTPHeaders(value *IngressControllerHTTPHeadersApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.HTTPHeaders = value + return b +} + +// WithHTTPEmptyRequestsPolicy sets the HTTPEmptyRequestsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPEmptyRequestsPolicy field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithHTTPEmptyRequestsPolicy(value operatorv1.HTTPEmptyRequestsPolicy) *IngressControllerSpecApplyConfiguration { + b.HTTPEmptyRequestsPolicy = &value + return b +} + +// WithTuningOptions sets the TuningOptions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TuningOptions field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithTuningOptions(value *IngressControllerTuningOptionsApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.TuningOptions = value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *IngressControllerSpecApplyConfiguration { + b.UnsupportedConfigOverrides = &value + return b +} + +// WithHTTPCompression sets the HTTPCompression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPCompression field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithHTTPCompression(value *HTTPCompressionPolicyApplyConfiguration) *IngressControllerSpecApplyConfiguration { + b.HTTPCompression = value + return b +} + +// WithIdleConnectionTerminationPolicy sets the IdleConnectionTerminationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IdleConnectionTerminationPolicy field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithIdleConnectionTerminationPolicy(value operatorv1.IngressControllerConnectionTerminationPolicy) *IngressControllerSpecApplyConfiguration { + b.IdleConnectionTerminationPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerstatus.go new file mode 100644 index 0000000000..2e558388d6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerstatus.go @@ -0,0 +1,105 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IngressControllerStatusApplyConfiguration represents a declarative configuration of the IngressControllerStatus type for use +// with apply. +type IngressControllerStatusApplyConfiguration struct { + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + Selector *string `json:"selector,omitempty"` + Domain *string `json:"domain,omitempty"` + EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` + Conditions []OperatorConditionApplyConfiguration `json:"conditions,omitempty"` + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` +} + +// IngressControllerStatusApplyConfiguration constructs a declarative configuration of the IngressControllerStatus type for use with +// apply. +func IngressControllerStatus() *IngressControllerStatusApplyConfiguration { + return &IngressControllerStatusApplyConfiguration{} +} + +// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableReplicas field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithAvailableReplicas(value int32) *IngressControllerStatusApplyConfiguration { + b.AvailableReplicas = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithSelector(value string) *IngressControllerStatusApplyConfiguration { + b.Selector = &value + return b +} + +// WithDomain sets the Domain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Domain field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithDomain(value string) *IngressControllerStatusApplyConfiguration { + b.Domain = &value + return b +} + +// WithEndpointPublishingStrategy sets the EndpointPublishingStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EndpointPublishingStrategy field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithEndpointPublishingStrategy(value *EndpointPublishingStrategyApplyConfiguration) *IngressControllerStatusApplyConfiguration { + b.EndpointPublishingStrategy = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *IngressControllerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *IngressControllerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithTLSProfile sets the TLSProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSProfile field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithTLSProfile(value configv1.TLSProfileSpec) *IngressControllerStatusApplyConfiguration { + b.TLSProfile = &value + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithObservedGeneration(value int64) *IngressControllerStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *IngressControllerStatusApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithRouteSelector sets the RouteSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouteSelector field is set to the value of the last call. +func (b *IngressControllerStatusApplyConfiguration) WithRouteSelector(value *metav1.LabelSelectorApplyConfiguration) *IngressControllerStatusApplyConfiguration { + b.RouteSelector = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go new file mode 100644 index 0000000000..122801cf10 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go @@ -0,0 +1,135 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IngressControllerTuningOptionsApplyConfiguration represents a declarative configuration of the IngressControllerTuningOptions type for use +// with apply. +type IngressControllerTuningOptionsApplyConfiguration struct { + HeaderBufferBytes *int32 `json:"headerBufferBytes,omitempty"` + HeaderBufferMaxRewriteBytes *int32 `json:"headerBufferMaxRewriteBytes,omitempty"` + ThreadCount *int32 `json:"threadCount,omitempty"` + ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` + ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` + ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` + ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` + TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` + ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` + HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` + MaxConnections *int32 `json:"maxConnections,omitempty"` + ReloadInterval *metav1.Duration `json:"reloadInterval,omitempty"` +} + +// IngressControllerTuningOptionsApplyConfiguration constructs a declarative configuration of the IngressControllerTuningOptions type for use with +// apply. +func IngressControllerTuningOptions() *IngressControllerTuningOptionsApplyConfiguration { + return &IngressControllerTuningOptionsApplyConfiguration{} +} + +// WithHeaderBufferBytes sets the HeaderBufferBytes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HeaderBufferBytes field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithHeaderBufferBytes(value int32) *IngressControllerTuningOptionsApplyConfiguration { + b.HeaderBufferBytes = &value + return b +} + +// WithHeaderBufferMaxRewriteBytes sets the HeaderBufferMaxRewriteBytes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HeaderBufferMaxRewriteBytes field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithHeaderBufferMaxRewriteBytes(value int32) *IngressControllerTuningOptionsApplyConfiguration { + b.HeaderBufferMaxRewriteBytes = &value + return b +} + +// WithThreadCount sets the ThreadCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ThreadCount field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithThreadCount(value int32) *IngressControllerTuningOptionsApplyConfiguration { + b.ThreadCount = &value + return b +} + +// WithClientTimeout sets the ClientTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.ClientTimeout = &value + return b +} + +// WithClientFinTimeout sets the ClientFinTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientFinTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithClientFinTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.ClientFinTimeout = &value + return b +} + +// WithServerTimeout sets the ServerTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.ServerTimeout = &value + return b +} + +// WithServerFinTimeout sets the ServerFinTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerFinTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithServerFinTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.ServerFinTimeout = &value + return b +} + +// WithTunnelTimeout sets the TunnelTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TunnelTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithTunnelTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.TunnelTimeout = &value + return b +} + +// WithConnectTimeout sets the ConnectTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConnectTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithConnectTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.ConnectTimeout = &value + return b +} + +// WithTLSInspectDelay sets the TLSInspectDelay field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLSInspectDelay field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithTLSInspectDelay(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.TLSInspectDelay = &value + return b +} + +// WithHealthCheckInterval sets the HealthCheckInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HealthCheckInterval field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithHealthCheckInterval(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.HealthCheckInterval = &value + return b +} + +// WithMaxConnections sets the MaxConnections field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxConnections field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithMaxConnections(value int32) *IngressControllerTuningOptionsApplyConfiguration { + b.MaxConnections = &value + return b +} + +// WithReloadInterval sets the ReloadInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReloadInterval field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithReloadInterval(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.ReloadInterval = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go new file mode 100644 index 0000000000..b694f1ca34 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InsightsOperatorApplyConfiguration represents a declarative configuration of the InsightsOperator type for use +// with apply. +type InsightsOperatorApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsOperatorSpecApplyConfiguration `json:"spec,omitempty"` + Status *InsightsOperatorStatusApplyConfiguration `json:"status,omitempty"` +} + +// InsightsOperator constructs a declarative configuration of the InsightsOperator type for use with +// apply. +func InsightsOperator(name string) *InsightsOperatorApplyConfiguration { + b := &InsightsOperatorApplyConfiguration{} + b.WithName(name) + b.WithKind("InsightsOperator") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractInsightsOperator extracts the applied configuration owned by fieldManager from +// insightsOperator. If no managedFields are found in insightsOperator for fieldManager, a +// InsightsOperatorApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// insightsOperator must be a unmodified InsightsOperator API object that was retrieved from the Kubernetes API. +// ExtractInsightsOperator provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInsightsOperator(insightsOperator *operatorv1.InsightsOperator, fieldManager string) (*InsightsOperatorApplyConfiguration, error) { + return extractInsightsOperator(insightsOperator, fieldManager, "") +} + +// ExtractInsightsOperatorStatus is the same as ExtractInsightsOperator except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInsightsOperatorStatus(insightsOperator *operatorv1.InsightsOperator, fieldManager string) (*InsightsOperatorApplyConfiguration, error) { + return extractInsightsOperator(insightsOperator, fieldManager, "status") +} + +func extractInsightsOperator(insightsOperator *operatorv1.InsightsOperator, fieldManager string, subresource string) (*InsightsOperatorApplyConfiguration, error) { + b := &InsightsOperatorApplyConfiguration{} + err := managedfields.ExtractInto(insightsOperator, internal.Parser().Type("com.github.openshift.api.operator.v1.InsightsOperator"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(insightsOperator.Name) + + b.WithKind("InsightsOperator") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithKind(value string) *InsightsOperatorApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithAPIVersion(value string) *InsightsOperatorApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithName(value string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithGenerateName(value string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithNamespace(value string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithUID(value types.UID) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithResourceVersion(value string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithGeneration(value int64) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InsightsOperatorApplyConfiguration) WithLabels(entries map[string]string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InsightsOperatorApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InsightsOperatorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InsightsOperatorApplyConfiguration) WithFinalizers(values ...string) *InsightsOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InsightsOperatorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithSpec(value *InsightsOperatorSpecApplyConfiguration) *InsightsOperatorApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *InsightsOperatorApplyConfiguration) WithStatus(value *InsightsOperatorStatusApplyConfiguration) *InsightsOperatorApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InsightsOperatorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go new file mode 100644 index 0000000000..c6085db4a2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// InsightsOperatorSpecApplyConfiguration represents a declarative configuration of the InsightsOperatorSpec type for use +// with apply. +type InsightsOperatorSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// InsightsOperatorSpecApplyConfiguration constructs a declarative configuration of the InsightsOperatorSpec type for use with +// apply. +func InsightsOperatorSpec() *InsightsOperatorSpecApplyConfiguration { + return &InsightsOperatorSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *InsightsOperatorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *InsightsOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *InsightsOperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *InsightsOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *InsightsOperatorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *InsightsOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *InsightsOperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *InsightsOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *InsightsOperatorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *InsightsOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go new file mode 100644 index 0000000000..2c679168d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperatorstatus.go @@ -0,0 +1,91 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// InsightsOperatorStatusApplyConfiguration represents a declarative configuration of the InsightsOperatorStatus type for use +// with apply. +type InsightsOperatorStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` + GatherStatus *GatherStatusApplyConfiguration `json:"gatherStatus,omitempty"` + InsightsReport *InsightsReportApplyConfiguration `json:"insightsReport,omitempty"` +} + +// InsightsOperatorStatusApplyConfiguration constructs a declarative configuration of the InsightsOperatorStatus type for use with +// apply. +func InsightsOperatorStatus() *InsightsOperatorStatusApplyConfiguration { + return &InsightsOperatorStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *InsightsOperatorStatusApplyConfiguration) WithObservedGeneration(value int64) *InsightsOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *InsightsOperatorStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *InsightsOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *InsightsOperatorStatusApplyConfiguration) WithVersion(value string) *InsightsOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *InsightsOperatorStatusApplyConfiguration) WithReadyReplicas(value int32) *InsightsOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *InsightsOperatorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *InsightsOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *InsightsOperatorStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *InsightsOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} + +// WithGatherStatus sets the GatherStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatherStatus field is set to the value of the last call. +func (b *InsightsOperatorStatusApplyConfiguration) WithGatherStatus(value *GatherStatusApplyConfiguration) *InsightsOperatorStatusApplyConfiguration { + b.GatherStatus = value + return b +} + +// WithInsightsReport sets the InsightsReport field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InsightsReport field is set to the value of the last call. +func (b *InsightsOperatorStatusApplyConfiguration) WithInsightsReport(value *InsightsReportApplyConfiguration) *InsightsOperatorStatusApplyConfiguration { + b.InsightsReport = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go new file mode 100644 index 0000000000..ce89fca0fd --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// InsightsReportApplyConfiguration represents a declarative configuration of the InsightsReport type for use +// with apply. +type InsightsReportApplyConfiguration struct { + DownloadedAt *metav1.Time `json:"downloadedAt,omitempty"` + HealthChecks []HealthCheckApplyConfiguration `json:"healthChecks,omitempty"` +} + +// InsightsReportApplyConfiguration constructs a declarative configuration of the InsightsReport type for use with +// apply. +func InsightsReport() *InsightsReportApplyConfiguration { + return &InsightsReportApplyConfiguration{} +} + +// WithDownloadedAt sets the DownloadedAt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DownloadedAt field is set to the value of the last call. +func (b *InsightsReportApplyConfiguration) WithDownloadedAt(value metav1.Time) *InsightsReportApplyConfiguration { + b.DownloadedAt = &value + return b +} + +// WithHealthChecks adds the given value to the HealthChecks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HealthChecks field. +func (b *InsightsReportApplyConfiguration) WithHealthChecks(values ...*HealthCheckApplyConfiguration) *InsightsReportApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHealthChecks") + } + b.HealthChecks = append(b.HealthChecks, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go new file mode 100644 index 0000000000..c2cbc3069f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipamconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IPAMConfigApplyConfiguration represents a declarative configuration of the IPAMConfig type for use +// with apply. +type IPAMConfigApplyConfiguration struct { + Type *operatorv1.IPAMType `json:"type,omitempty"` + StaticIPAMConfig *StaticIPAMConfigApplyConfiguration `json:"staticIPAMConfig,omitempty"` +} + +// IPAMConfigApplyConfiguration constructs a declarative configuration of the IPAMConfig type for use with +// apply. +func IPAMConfig() *IPAMConfigApplyConfiguration { + return &IPAMConfigApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *IPAMConfigApplyConfiguration) WithType(value operatorv1.IPAMType) *IPAMConfigApplyConfiguration { + b.Type = &value + return b +} + +// WithStaticIPAMConfig sets the StaticIPAMConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StaticIPAMConfig field is set to the value of the last call. +func (b *IPAMConfigApplyConfiguration) WithStaticIPAMConfig(value *StaticIPAMConfigApplyConfiguration) *IPAMConfigApplyConfiguration { + b.StaticIPAMConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go new file mode 100644 index 0000000000..c9bee3327f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipfixconfig.go @@ -0,0 +1,29 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IPFIXConfigApplyConfiguration represents a declarative configuration of the IPFIXConfig type for use +// with apply. +type IPFIXConfigApplyConfiguration struct { + Collectors []operatorv1.IPPort `json:"collectors,omitempty"` +} + +// IPFIXConfigApplyConfiguration constructs a declarative configuration of the IPFIXConfig type for use with +// apply. +func IPFIXConfig() *IPFIXConfigApplyConfiguration { + return &IPFIXConfigApplyConfiguration{} +} + +// WithCollectors adds the given value to the Collectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Collectors field. +func (b *IPFIXConfigApplyConfiguration) WithCollectors(values ...operatorv1.IPPort) *IPFIXConfigApplyConfiguration { + for i := range values { + b.Collectors = append(b.Collectors, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go new file mode 100644 index 0000000000..eb4fc92078 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IPsecConfigApplyConfiguration represents a declarative configuration of the IPsecConfig type for use +// with apply. +type IPsecConfigApplyConfiguration struct { + Mode *operatorv1.IPsecMode `json:"mode,omitempty"` + Full *IPsecFullModeConfigApplyConfiguration `json:"full,omitempty"` +} + +// IPsecConfigApplyConfiguration constructs a declarative configuration of the IPsecConfig type for use with +// apply. +func IPsecConfig() *IPsecConfigApplyConfiguration { + return &IPsecConfigApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *IPsecConfigApplyConfiguration) WithMode(value operatorv1.IPsecMode) *IPsecConfigApplyConfiguration { + b.Mode = &value + return b +} + +// WithFull sets the Full field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Full field is set to the value of the last call. +func (b *IPsecConfigApplyConfiguration) WithFull(value *IPsecFullModeConfigApplyConfiguration) *IPsecConfigApplyConfiguration { + b.Full = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go new file mode 100644 index 0000000000..208a4229c1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IPsecFullModeConfigApplyConfiguration represents a declarative configuration of the IPsecFullModeConfig type for use +// with apply. +type IPsecFullModeConfigApplyConfiguration struct { + Encapsulation *operatorv1.Encapsulation `json:"encapsulation,omitempty"` +} + +// IPsecFullModeConfigApplyConfiguration constructs a declarative configuration of the IPsecFullModeConfig type for use with +// apply. +func IPsecFullModeConfig() *IPsecFullModeConfigApplyConfiguration { + return &IPsecFullModeConfigApplyConfiguration{} +} + +// WithEncapsulation sets the Encapsulation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Encapsulation field is set to the value of the last call. +func (b *IPsecFullModeConfigApplyConfiguration) WithEncapsulation(value operatorv1.Encapsulation) *IPsecFullModeConfigApplyConfiguration { + b.Encapsulation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4gatewayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4gatewayconfig.go new file mode 100644 index 0000000000..951ea7aedc --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4gatewayconfig.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IPv4GatewayConfigApplyConfiguration represents a declarative configuration of the IPv4GatewayConfig type for use +// with apply. +type IPv4GatewayConfigApplyConfiguration struct { + InternalMasqueradeSubnet *string `json:"internalMasqueradeSubnet,omitempty"` +} + +// IPv4GatewayConfigApplyConfiguration constructs a declarative configuration of the IPv4GatewayConfig type for use with +// apply. +func IPv4GatewayConfig() *IPv4GatewayConfigApplyConfiguration { + return &IPv4GatewayConfigApplyConfiguration{} +} + +// WithInternalMasqueradeSubnet sets the InternalMasqueradeSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalMasqueradeSubnet field is set to the value of the last call. +func (b *IPv4GatewayConfigApplyConfiguration) WithInternalMasqueradeSubnet(value string) *IPv4GatewayConfigApplyConfiguration { + b.InternalMasqueradeSubnet = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4ovnkubernetesconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4ovnkubernetesconfig.go new file mode 100644 index 0000000000..74cdef5248 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv4ovnkubernetesconfig.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IPv4OVNKubernetesConfigApplyConfiguration represents a declarative configuration of the IPv4OVNKubernetesConfig type for use +// with apply. +type IPv4OVNKubernetesConfigApplyConfiguration struct { + InternalTransitSwitchSubnet *string `json:"internalTransitSwitchSubnet,omitempty"` + InternalJoinSubnet *string `json:"internalJoinSubnet,omitempty"` +} + +// IPv4OVNKubernetesConfigApplyConfiguration constructs a declarative configuration of the IPv4OVNKubernetesConfig type for use with +// apply. +func IPv4OVNKubernetesConfig() *IPv4OVNKubernetesConfigApplyConfiguration { + return &IPv4OVNKubernetesConfigApplyConfiguration{} +} + +// WithInternalTransitSwitchSubnet sets the InternalTransitSwitchSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalTransitSwitchSubnet field is set to the value of the last call. +func (b *IPv4OVNKubernetesConfigApplyConfiguration) WithInternalTransitSwitchSubnet(value string) *IPv4OVNKubernetesConfigApplyConfiguration { + b.InternalTransitSwitchSubnet = &value + return b +} + +// WithInternalJoinSubnet sets the InternalJoinSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalJoinSubnet field is set to the value of the last call. +func (b *IPv4OVNKubernetesConfigApplyConfiguration) WithInternalJoinSubnet(value string) *IPv4OVNKubernetesConfigApplyConfiguration { + b.InternalJoinSubnet = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6gatewayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6gatewayconfig.go new file mode 100644 index 0000000000..66436ec789 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6gatewayconfig.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IPv6GatewayConfigApplyConfiguration represents a declarative configuration of the IPv6GatewayConfig type for use +// with apply. +type IPv6GatewayConfigApplyConfiguration struct { + InternalMasqueradeSubnet *string `json:"internalMasqueradeSubnet,omitempty"` +} + +// IPv6GatewayConfigApplyConfiguration constructs a declarative configuration of the IPv6GatewayConfig type for use with +// apply. +func IPv6GatewayConfig() *IPv6GatewayConfigApplyConfiguration { + return &IPv6GatewayConfigApplyConfiguration{} +} + +// WithInternalMasqueradeSubnet sets the InternalMasqueradeSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalMasqueradeSubnet field is set to the value of the last call. +func (b *IPv6GatewayConfigApplyConfiguration) WithInternalMasqueradeSubnet(value string) *IPv6GatewayConfigApplyConfiguration { + b.InternalMasqueradeSubnet = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6ovnkubernetesconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6ovnkubernetesconfig.go new file mode 100644 index 0000000000..64deec5c10 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipv6ovnkubernetesconfig.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IPv6OVNKubernetesConfigApplyConfiguration represents a declarative configuration of the IPv6OVNKubernetesConfig type for use +// with apply. +type IPv6OVNKubernetesConfigApplyConfiguration struct { + InternalTransitSwitchSubnet *string `json:"internalTransitSwitchSubnet,omitempty"` + InternalJoinSubnet *string `json:"internalJoinSubnet,omitempty"` +} + +// IPv6OVNKubernetesConfigApplyConfiguration constructs a declarative configuration of the IPv6OVNKubernetesConfig type for use with +// apply. +func IPv6OVNKubernetesConfig() *IPv6OVNKubernetesConfigApplyConfiguration { + return &IPv6OVNKubernetesConfigApplyConfiguration{} +} + +// WithInternalTransitSwitchSubnet sets the InternalTransitSwitchSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalTransitSwitchSubnet field is set to the value of the last call. +func (b *IPv6OVNKubernetesConfigApplyConfiguration) WithInternalTransitSwitchSubnet(value string) *IPv6OVNKubernetesConfigApplyConfiguration { + b.InternalTransitSwitchSubnet = &value + return b +} + +// WithInternalJoinSubnet sets the InternalJoinSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalJoinSubnet field is set to the value of the last call. +func (b *IPv6OVNKubernetesConfigApplyConfiguration) WithInternalJoinSubnet(value string) *IPv6OVNKubernetesConfigApplyConfiguration { + b.InternalJoinSubnet = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go new file mode 100644 index 0000000000..8ff4292bc2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// KubeAPIServerApplyConfiguration represents a declarative configuration of the KubeAPIServer type for use +// with apply. +type KubeAPIServerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeAPIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeAPIServerStatusApplyConfiguration `json:"status,omitempty"` +} + +// KubeAPIServer constructs a declarative configuration of the KubeAPIServer type for use with +// apply. +func KubeAPIServer(name string) *KubeAPIServerApplyConfiguration { + b := &KubeAPIServerApplyConfiguration{} + b.WithName(name) + b.WithKind("KubeAPIServer") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractKubeAPIServer extracts the applied configuration owned by fieldManager from +// kubeAPIServer. If no managedFields are found in kubeAPIServer for fieldManager, a +// KubeAPIServerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// kubeAPIServer must be a unmodified KubeAPIServer API object that was retrieved from the Kubernetes API. +// ExtractKubeAPIServer provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractKubeAPIServer(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager string) (*KubeAPIServerApplyConfiguration, error) { + return extractKubeAPIServer(kubeAPIServer, fieldManager, "") +} + +// ExtractKubeAPIServerStatus is the same as ExtractKubeAPIServer except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractKubeAPIServerStatus(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager string) (*KubeAPIServerApplyConfiguration, error) { + return extractKubeAPIServer(kubeAPIServer, fieldManager, "status") +} + +func extractKubeAPIServer(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager string, subresource string) (*KubeAPIServerApplyConfiguration, error) { + b := &KubeAPIServerApplyConfiguration{} + err := managedfields.ExtractInto(kubeAPIServer, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeAPIServer"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(kubeAPIServer.Name) + + b.WithKind("KubeAPIServer") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithKind(value string) *KubeAPIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithAPIVersion(value string) *KubeAPIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithName(value string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithGenerateName(value string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithNamespace(value string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithUID(value types.UID) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithResourceVersion(value string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithGeneration(value int64) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *KubeAPIServerApplyConfiguration) WithLabels(entries map[string]string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *KubeAPIServerApplyConfiguration) WithAnnotations(entries map[string]string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *KubeAPIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *KubeAPIServerApplyConfiguration) WithFinalizers(values ...string) *KubeAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *KubeAPIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithSpec(value *KubeAPIServerSpecApplyConfiguration) *KubeAPIServerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *KubeAPIServerApplyConfiguration) WithStatus(value *KubeAPIServerStatusApplyConfiguration) *KubeAPIServerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *KubeAPIServerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go new file mode 100644 index 0000000000..71b60a95b1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go @@ -0,0 +1,84 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// KubeAPIServerSpecApplyConfiguration represents a declarative configuration of the KubeAPIServerSpec type for use +// with apply. +type KubeAPIServerSpecApplyConfiguration struct { + StaticPodOperatorSpecApplyConfiguration `json:",inline"` +} + +// KubeAPIServerSpecApplyConfiguration constructs a declarative configuration of the KubeAPIServerSpec type for use with +// apply. +func KubeAPIServerSpec() *KubeAPIServerSpecApplyConfiguration { + return &KubeAPIServerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithForceRedeploymentReason sets the ForceRedeploymentReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithForceRedeploymentReason(value string) *KubeAPIServerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value + return b +} + +// WithFailedRevisionLimit sets the FailedRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedRevisionLimit field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *KubeAPIServerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value + return b +} + +// WithSucceededRevisionLimit sets the SucceededRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *KubeAPIServerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go new file mode 100644 index 0000000000..ff65c51136 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go @@ -0,0 +1,108 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// KubeAPIServerStatusApplyConfiguration represents a declarative configuration of the KubeAPIServerStatus type for use +// with apply. +type KubeAPIServerStatusApplyConfiguration struct { + StaticPodOperatorStatusApplyConfiguration `json:",inline"` + ServiceAccountIssuers []ServiceAccountIssuerStatusApplyConfiguration `json:"serviceAccountIssuers,omitempty"` +} + +// KubeAPIServerStatusApplyConfiguration constructs a declarative configuration of the KubeAPIServerStatus type for use with +// apply. +func KubeAPIServerStatus() *KubeAPIServerStatusApplyConfiguration { + return &KubeAPIServerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *KubeAPIServerStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *KubeAPIServerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *KubeAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *KubeAPIServerStatusApplyConfiguration) WithVersion(value string) *KubeAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *KubeAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *KubeAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *KubeAPIServerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *KubeAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} + +// WithLatestAvailableRevisionReason sets the LatestAvailableRevisionReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. +func (b *KubeAPIServerStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *KubeAPIServerStatusApplyConfiguration { + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value + return b +} + +// WithNodeStatuses adds the given value to the NodeStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NodeStatuses field. +func (b *KubeAPIServerStatusApplyConfiguration) WithNodeStatuses(values ...*NodeStatusApplyConfiguration) *KubeAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNodeStatuses") + } + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) + } + return b +} + +// WithServiceAccountIssuers adds the given value to the ServiceAccountIssuers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceAccountIssuers field. +func (b *KubeAPIServerStatusApplyConfiguration) WithServiceAccountIssuers(values ...*ServiceAccountIssuerStatusApplyConfiguration) *KubeAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceAccountIssuers") + } + b.ServiceAccountIssuers = append(b.ServiceAccountIssuers, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go new file mode 100644 index 0000000000..731b6793ab --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// KubeControllerManagerApplyConfiguration represents a declarative configuration of the KubeControllerManager type for use +// with apply. +type KubeControllerManagerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeControllerManagerStatusApplyConfiguration `json:"status,omitempty"` +} + +// KubeControllerManager constructs a declarative configuration of the KubeControllerManager type for use with +// apply. +func KubeControllerManager(name string) *KubeControllerManagerApplyConfiguration { + b := &KubeControllerManagerApplyConfiguration{} + b.WithName(name) + b.WithKind("KubeControllerManager") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractKubeControllerManager extracts the applied configuration owned by fieldManager from +// kubeControllerManager. If no managedFields are found in kubeControllerManager for fieldManager, a +// KubeControllerManagerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// kubeControllerManager must be a unmodified KubeControllerManager API object that was retrieved from the Kubernetes API. +// ExtractKubeControllerManager provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractKubeControllerManager(kubeControllerManager *operatorv1.KubeControllerManager, fieldManager string) (*KubeControllerManagerApplyConfiguration, error) { + return extractKubeControllerManager(kubeControllerManager, fieldManager, "") +} + +// ExtractKubeControllerManagerStatus is the same as ExtractKubeControllerManager except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractKubeControllerManagerStatus(kubeControllerManager *operatorv1.KubeControllerManager, fieldManager string) (*KubeControllerManagerApplyConfiguration, error) { + return extractKubeControllerManager(kubeControllerManager, fieldManager, "status") +} + +func extractKubeControllerManager(kubeControllerManager *operatorv1.KubeControllerManager, fieldManager string, subresource string) (*KubeControllerManagerApplyConfiguration, error) { + b := &KubeControllerManagerApplyConfiguration{} + err := managedfields.ExtractInto(kubeControllerManager, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeControllerManager"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(kubeControllerManager.Name) + + b.WithKind("KubeControllerManager") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithKind(value string) *KubeControllerManagerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithAPIVersion(value string) *KubeControllerManagerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithName(value string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithGenerateName(value string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithNamespace(value string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithUID(value types.UID) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithResourceVersion(value string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithGeneration(value int64) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *KubeControllerManagerApplyConfiguration) WithLabels(entries map[string]string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *KubeControllerManagerApplyConfiguration) WithAnnotations(entries map[string]string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *KubeControllerManagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *KubeControllerManagerApplyConfiguration) WithFinalizers(values ...string) *KubeControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *KubeControllerManagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithSpec(value *KubeControllerManagerSpecApplyConfiguration) *KubeControllerManagerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *KubeControllerManagerApplyConfiguration) WithStatus(value *KubeControllerManagerStatusApplyConfiguration) *KubeControllerManagerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *KubeControllerManagerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go new file mode 100644 index 0000000000..8a51815784 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerspec.go @@ -0,0 +1,93 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// KubeControllerManagerSpecApplyConfiguration represents a declarative configuration of the KubeControllerManagerSpec type for use +// with apply. +type KubeControllerManagerSpecApplyConfiguration struct { + StaticPodOperatorSpecApplyConfiguration `json:",inline"` + UseMoreSecureServiceCA *bool `json:"useMoreSecureServiceCA,omitempty"` +} + +// KubeControllerManagerSpecApplyConfiguration constructs a declarative configuration of the KubeControllerManagerSpec type for use with +// apply. +func KubeControllerManagerSpec() *KubeControllerManagerSpecApplyConfiguration { + return &KubeControllerManagerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithForceRedeploymentReason sets the ForceRedeploymentReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithForceRedeploymentReason(value string) *KubeControllerManagerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value + return b +} + +// WithFailedRevisionLimit sets the FailedRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedRevisionLimit field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *KubeControllerManagerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value + return b +} + +// WithSucceededRevisionLimit sets the SucceededRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *KubeControllerManagerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value + return b +} + +// WithUseMoreSecureServiceCA sets the UseMoreSecureServiceCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UseMoreSecureServiceCA field is set to the value of the last call. +func (b *KubeControllerManagerSpecApplyConfiguration) WithUseMoreSecureServiceCA(value bool) *KubeControllerManagerSpecApplyConfiguration { + b.UseMoreSecureServiceCA = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go new file mode 100644 index 0000000000..1c72dff266 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanagerstatus.go @@ -0,0 +1,94 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// KubeControllerManagerStatusApplyConfiguration represents a declarative configuration of the KubeControllerManagerStatus type for use +// with apply. +type KubeControllerManagerStatusApplyConfiguration struct { + StaticPodOperatorStatusApplyConfiguration `json:",inline"` +} + +// KubeControllerManagerStatusApplyConfiguration constructs a declarative configuration of the KubeControllerManagerStatus type for use with +// apply. +func KubeControllerManagerStatus() *KubeControllerManagerStatusApplyConfiguration { + return &KubeControllerManagerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *KubeControllerManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *KubeControllerManagerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *KubeControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *KubeControllerManagerStatusApplyConfiguration) WithVersion(value string) *KubeControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *KubeControllerManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *KubeControllerManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *KubeControllerManagerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *KubeControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} + +// WithLatestAvailableRevisionReason sets the LatestAvailableRevisionReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. +func (b *KubeControllerManagerStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *KubeControllerManagerStatusApplyConfiguration { + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value + return b +} + +// WithNodeStatuses adds the given value to the NodeStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NodeStatuses field. +func (b *KubeControllerManagerStatusApplyConfiguration) WithNodeStatuses(values ...*NodeStatusApplyConfiguration) *KubeControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNodeStatuses") + } + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go new file mode 100644 index 0000000000..77e6ca3432 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// KubeSchedulerApplyConfiguration represents a declarative configuration of the KubeScheduler type for use +// with apply. +type KubeSchedulerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeSchedulerSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeSchedulerStatusApplyConfiguration `json:"status,omitempty"` +} + +// KubeScheduler constructs a declarative configuration of the KubeScheduler type for use with +// apply. +func KubeScheduler(name string) *KubeSchedulerApplyConfiguration { + b := &KubeSchedulerApplyConfiguration{} + b.WithName(name) + b.WithKind("KubeScheduler") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractKubeScheduler extracts the applied configuration owned by fieldManager from +// kubeScheduler. If no managedFields are found in kubeScheduler for fieldManager, a +// KubeSchedulerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// kubeScheduler must be a unmodified KubeScheduler API object that was retrieved from the Kubernetes API. +// ExtractKubeScheduler provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractKubeScheduler(kubeScheduler *operatorv1.KubeScheduler, fieldManager string) (*KubeSchedulerApplyConfiguration, error) { + return extractKubeScheduler(kubeScheduler, fieldManager, "") +} + +// ExtractKubeSchedulerStatus is the same as ExtractKubeScheduler except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractKubeSchedulerStatus(kubeScheduler *operatorv1.KubeScheduler, fieldManager string) (*KubeSchedulerApplyConfiguration, error) { + return extractKubeScheduler(kubeScheduler, fieldManager, "status") +} + +func extractKubeScheduler(kubeScheduler *operatorv1.KubeScheduler, fieldManager string, subresource string) (*KubeSchedulerApplyConfiguration, error) { + b := &KubeSchedulerApplyConfiguration{} + err := managedfields.ExtractInto(kubeScheduler, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeScheduler"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(kubeScheduler.Name) + + b.WithKind("KubeScheduler") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithKind(value string) *KubeSchedulerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithAPIVersion(value string) *KubeSchedulerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithName(value string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithGenerateName(value string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithNamespace(value string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithUID(value types.UID) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithResourceVersion(value string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithGeneration(value int64) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *KubeSchedulerApplyConfiguration) WithLabels(entries map[string]string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *KubeSchedulerApplyConfiguration) WithAnnotations(entries map[string]string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *KubeSchedulerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *KubeSchedulerApplyConfiguration) WithFinalizers(values ...string) *KubeSchedulerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *KubeSchedulerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithSpec(value *KubeSchedulerSpecApplyConfiguration) *KubeSchedulerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *KubeSchedulerApplyConfiguration) WithStatus(value *KubeSchedulerStatusApplyConfiguration) *KubeSchedulerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *KubeSchedulerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go new file mode 100644 index 0000000000..94bd1d61fd --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerspec.go @@ -0,0 +1,84 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// KubeSchedulerSpecApplyConfiguration represents a declarative configuration of the KubeSchedulerSpec type for use +// with apply. +type KubeSchedulerSpecApplyConfiguration struct { + StaticPodOperatorSpecApplyConfiguration `json:",inline"` +} + +// KubeSchedulerSpecApplyConfiguration constructs a declarative configuration of the KubeSchedulerSpec type for use with +// apply. +func KubeSchedulerSpec() *KubeSchedulerSpecApplyConfiguration { + return &KubeSchedulerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeSchedulerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeSchedulerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeSchedulerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeSchedulerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeSchedulerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithForceRedeploymentReason sets the ForceRedeploymentReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithForceRedeploymentReason(value string) *KubeSchedulerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value + return b +} + +// WithFailedRevisionLimit sets the FailedRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedRevisionLimit field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *KubeSchedulerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value + return b +} + +// WithSucceededRevisionLimit sets the SucceededRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. +func (b *KubeSchedulerSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *KubeSchedulerSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go new file mode 100644 index 0000000000..821b4c3f94 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeschedulerstatus.go @@ -0,0 +1,94 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// KubeSchedulerStatusApplyConfiguration represents a declarative configuration of the KubeSchedulerStatus type for use +// with apply. +type KubeSchedulerStatusApplyConfiguration struct { + StaticPodOperatorStatusApplyConfiguration `json:",inline"` +} + +// KubeSchedulerStatusApplyConfiguration constructs a declarative configuration of the KubeSchedulerStatus type for use with +// apply. +func KubeSchedulerStatus() *KubeSchedulerStatusApplyConfiguration { + return &KubeSchedulerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *KubeSchedulerStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeSchedulerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *KubeSchedulerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *KubeSchedulerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *KubeSchedulerStatusApplyConfiguration) WithVersion(value string) *KubeSchedulerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *KubeSchedulerStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeSchedulerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *KubeSchedulerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeSchedulerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *KubeSchedulerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *KubeSchedulerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} + +// WithLatestAvailableRevisionReason sets the LatestAvailableRevisionReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. +func (b *KubeSchedulerStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *KubeSchedulerStatusApplyConfiguration { + b.StaticPodOperatorStatusApplyConfiguration.LatestAvailableRevisionReason = &value + return b +} + +// WithNodeStatuses adds the given value to the NodeStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NodeStatuses field. +func (b *KubeSchedulerStatusApplyConfiguration) WithNodeStatuses(values ...*NodeStatusApplyConfiguration) *KubeSchedulerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNodeStatuses") + } + b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses = append(b.StaticPodOperatorStatusApplyConfiguration.NodeStatuses, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go new file mode 100644 index 0000000000..5c84a133f3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// KubeStorageVersionMigratorApplyConfiguration represents a declarative configuration of the KubeStorageVersionMigrator type for use +// with apply. +type KubeStorageVersionMigratorApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *KubeStorageVersionMigratorSpecApplyConfiguration `json:"spec,omitempty"` + Status *KubeStorageVersionMigratorStatusApplyConfiguration `json:"status,omitempty"` +} + +// KubeStorageVersionMigrator constructs a declarative configuration of the KubeStorageVersionMigrator type for use with +// apply. +func KubeStorageVersionMigrator(name string) *KubeStorageVersionMigratorApplyConfiguration { + b := &KubeStorageVersionMigratorApplyConfiguration{} + b.WithName(name) + b.WithKind("KubeStorageVersionMigrator") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractKubeStorageVersionMigrator extracts the applied configuration owned by fieldManager from +// kubeStorageVersionMigrator. If no managedFields are found in kubeStorageVersionMigrator for fieldManager, a +// KubeStorageVersionMigratorApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// kubeStorageVersionMigrator must be a unmodified KubeStorageVersionMigrator API object that was retrieved from the Kubernetes API. +// ExtractKubeStorageVersionMigrator provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractKubeStorageVersionMigrator(kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, fieldManager string) (*KubeStorageVersionMigratorApplyConfiguration, error) { + return extractKubeStorageVersionMigrator(kubeStorageVersionMigrator, fieldManager, "") +} + +// ExtractKubeStorageVersionMigratorStatus is the same as ExtractKubeStorageVersionMigrator except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractKubeStorageVersionMigratorStatus(kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, fieldManager string) (*KubeStorageVersionMigratorApplyConfiguration, error) { + return extractKubeStorageVersionMigrator(kubeStorageVersionMigrator, fieldManager, "status") +} + +func extractKubeStorageVersionMigrator(kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, fieldManager string, subresource string) (*KubeStorageVersionMigratorApplyConfiguration, error) { + b := &KubeStorageVersionMigratorApplyConfiguration{} + err := managedfields.ExtractInto(kubeStorageVersionMigrator, internal.Parser().Type("com.github.openshift.api.operator.v1.KubeStorageVersionMigrator"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(kubeStorageVersionMigrator.Name) + + b.WithKind("KubeStorageVersionMigrator") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithKind(value string) *KubeStorageVersionMigratorApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithAPIVersion(value string) *KubeStorageVersionMigratorApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithName(value string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithGenerateName(value string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithNamespace(value string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithUID(value types.UID) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithResourceVersion(value string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithGeneration(value int64) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithLabels(entries map[string]string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithAnnotations(entries map[string]string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithFinalizers(values ...string) *KubeStorageVersionMigratorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *KubeStorageVersionMigratorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithSpec(value *KubeStorageVersionMigratorSpecApplyConfiguration) *KubeStorageVersionMigratorApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *KubeStorageVersionMigratorApplyConfiguration) WithStatus(value *KubeStorageVersionMigratorStatusApplyConfiguration) *KubeStorageVersionMigratorApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *KubeStorageVersionMigratorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go new file mode 100644 index 0000000000..6acfcb82b3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// KubeStorageVersionMigratorSpecApplyConfiguration represents a declarative configuration of the KubeStorageVersionMigratorSpec type for use +// with apply. +type KubeStorageVersionMigratorSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// KubeStorageVersionMigratorSpecApplyConfiguration constructs a declarative configuration of the KubeStorageVersionMigratorSpec type for use with +// apply. +func KubeStorageVersionMigratorSpec() *KubeStorageVersionMigratorSpecApplyConfiguration { + return &KubeStorageVersionMigratorSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *KubeStorageVersionMigratorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *KubeStorageVersionMigratorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *KubeStorageVersionMigratorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *KubeStorageVersionMigratorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *KubeStorageVersionMigratorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *KubeStorageVersionMigratorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go new file mode 100644 index 0000000000..cad8e2f76d --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigratorstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// KubeStorageVersionMigratorStatusApplyConfiguration represents a declarative configuration of the KubeStorageVersionMigratorStatus type for use +// with apply. +type KubeStorageVersionMigratorStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// KubeStorageVersionMigratorStatusApplyConfiguration constructs a declarative configuration of the KubeStorageVersionMigratorStatus type for use with +// apply. +func KubeStorageVersionMigratorStatus() *KubeStorageVersionMigratorStatusApplyConfiguration { + return &KubeStorageVersionMigratorStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithObservedGeneration(value int64) *KubeStorageVersionMigratorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *KubeStorageVersionMigratorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithVersion(value string) *KubeStorageVersionMigratorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithReadyReplicas(value int32) *KubeStorageVersionMigratorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *KubeStorageVersionMigratorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *KubeStorageVersionMigratorStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *KubeStorageVersionMigratorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go new file mode 100644 index 0000000000..b8e83a02cf --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go @@ -0,0 +1,56 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// LoadBalancerStrategyApplyConfiguration represents a declarative configuration of the LoadBalancerStrategy type for use +// with apply. +type LoadBalancerStrategyApplyConfiguration struct { + Scope *operatorv1.LoadBalancerScope `json:"scope,omitempty"` + AllowedSourceRanges []operatorv1.CIDR `json:"allowedSourceRanges,omitempty"` + ProviderParameters *ProviderLoadBalancerParametersApplyConfiguration `json:"providerParameters,omitempty"` + DNSManagementPolicy *operatorv1.LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` +} + +// LoadBalancerStrategyApplyConfiguration constructs a declarative configuration of the LoadBalancerStrategy type for use with +// apply. +func LoadBalancerStrategy() *LoadBalancerStrategyApplyConfiguration { + return &LoadBalancerStrategyApplyConfiguration{} +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *LoadBalancerStrategyApplyConfiguration) WithScope(value operatorv1.LoadBalancerScope) *LoadBalancerStrategyApplyConfiguration { + b.Scope = &value + return b +} + +// WithAllowedSourceRanges adds the given value to the AllowedSourceRanges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedSourceRanges field. +func (b *LoadBalancerStrategyApplyConfiguration) WithAllowedSourceRanges(values ...operatorv1.CIDR) *LoadBalancerStrategyApplyConfiguration { + for i := range values { + b.AllowedSourceRanges = append(b.AllowedSourceRanges, values[i]) + } + return b +} + +// WithProviderParameters sets the ProviderParameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProviderParameters field is set to the value of the last call. +func (b *LoadBalancerStrategyApplyConfiguration) WithProviderParameters(value *ProviderLoadBalancerParametersApplyConfiguration) *LoadBalancerStrategyApplyConfiguration { + b.ProviderParameters = value + return b +} + +// WithDNSManagementPolicy sets the DNSManagementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSManagementPolicy field is set to the value of the last call. +func (b *LoadBalancerStrategyApplyConfiguration) WithDNSManagementPolicy(value operatorv1.LoadBalancerDNSManagementPolicy) *LoadBalancerStrategyApplyConfiguration { + b.DNSManagementPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go new file mode 100644 index 0000000000..36a7bd5c58 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loggingdestination.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// LoggingDestinationApplyConfiguration represents a declarative configuration of the LoggingDestination type for use +// with apply. +type LoggingDestinationApplyConfiguration struct { + Type *operatorv1.LoggingDestinationType `json:"type,omitempty"` + Syslog *SyslogLoggingDestinationParametersApplyConfiguration `json:"syslog,omitempty"` + Container *ContainerLoggingDestinationParametersApplyConfiguration `json:"container,omitempty"` +} + +// LoggingDestinationApplyConfiguration constructs a declarative configuration of the LoggingDestination type for use with +// apply. +func LoggingDestination() *LoggingDestinationApplyConfiguration { + return &LoggingDestinationApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *LoggingDestinationApplyConfiguration) WithType(value operatorv1.LoggingDestinationType) *LoggingDestinationApplyConfiguration { + b.Type = &value + return b +} + +// WithSyslog sets the Syslog field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Syslog field is set to the value of the last call. +func (b *LoggingDestinationApplyConfiguration) WithSyslog(value *SyslogLoggingDestinationParametersApplyConfiguration) *LoggingDestinationApplyConfiguration { + b.Syslog = value + return b +} + +// WithContainer sets the Container field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Container field is set to the value of the last call. +func (b *LoggingDestinationApplyConfiguration) WithContainer(value *ContainerLoggingDestinationParametersApplyConfiguration) *LoggingDestinationApplyConfiguration { + b.Container = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/logo.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/logo.go new file mode 100644 index 0000000000..dfa08aaf83 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/logo.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// LogoApplyConfiguration represents a declarative configuration of the Logo type for use +// with apply. +type LogoApplyConfiguration struct { + Type *operatorv1.LogoType `json:"type,omitempty"` + Themes []ThemeApplyConfiguration `json:"themes,omitempty"` +} + +// LogoApplyConfiguration constructs a declarative configuration of the Logo type for use with +// apply. +func Logo() *LogoApplyConfiguration { + return &LogoApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *LogoApplyConfiguration) WithType(value operatorv1.LogoType) *LogoApplyConfiguration { + b.Type = &value + return b +} + +// WithThemes adds the given value to the Themes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Themes field. +func (b *LogoApplyConfiguration) WithThemes(values ...*ThemeApplyConfiguration) *LogoApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithThemes") + } + b.Themes = append(b.Themes, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go new file mode 100644 index 0000000000..35d2b867eb --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineConfigurationApplyConfiguration represents a declarative configuration of the MachineConfiguration type for use +// with apply. +type MachineConfigurationApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MachineConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *MachineConfigurationStatusApplyConfiguration `json:"status,omitempty"` +} + +// MachineConfiguration constructs a declarative configuration of the MachineConfiguration type for use with +// apply. +func MachineConfiguration(name string) *MachineConfigurationApplyConfiguration { + b := &MachineConfigurationApplyConfiguration{} + b.WithName(name) + b.WithKind("MachineConfiguration") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractMachineConfiguration extracts the applied configuration owned by fieldManager from +// machineConfiguration. If no managedFields are found in machineConfiguration for fieldManager, a +// MachineConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// machineConfiguration must be a unmodified MachineConfiguration API object that was retrieved from the Kubernetes API. +// ExtractMachineConfiguration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMachineConfiguration(machineConfiguration *operatorv1.MachineConfiguration, fieldManager string) (*MachineConfigurationApplyConfiguration, error) { + return extractMachineConfiguration(machineConfiguration, fieldManager, "") +} + +// ExtractMachineConfigurationStatus is the same as ExtractMachineConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMachineConfigurationStatus(machineConfiguration *operatorv1.MachineConfiguration, fieldManager string) (*MachineConfigurationApplyConfiguration, error) { + return extractMachineConfiguration(machineConfiguration, fieldManager, "status") +} + +func extractMachineConfiguration(machineConfiguration *operatorv1.MachineConfiguration, fieldManager string, subresource string) (*MachineConfigurationApplyConfiguration, error) { + b := &MachineConfigurationApplyConfiguration{} + err := managedfields.ExtractInto(machineConfiguration, internal.Parser().Type("com.github.openshift.api.operator.v1.MachineConfiguration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(machineConfiguration.Name) + + b.WithKind("MachineConfiguration") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithKind(value string) *MachineConfigurationApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithAPIVersion(value string) *MachineConfigurationApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithName(value string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithGenerateName(value string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithNamespace(value string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithUID(value types.UID) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithResourceVersion(value string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithGeneration(value int64) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MachineConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MachineConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MachineConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MachineConfigurationApplyConfiguration) WithFinalizers(values ...string) *MachineConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *MachineConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithSpec(value *MachineConfigurationSpecApplyConfiguration) *MachineConfigurationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MachineConfigurationApplyConfiguration) WithStatus(value *MachineConfigurationStatusApplyConfiguration) *MachineConfigurationApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MachineConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go new file mode 100644 index 0000000000..cee3c69fc0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go @@ -0,0 +1,102 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// MachineConfigurationSpecApplyConfiguration represents a declarative configuration of the MachineConfigurationSpec type for use +// with apply. +type MachineConfigurationSpecApplyConfiguration struct { + StaticPodOperatorSpecApplyConfiguration `json:",inline"` + ManagedBootImages *ManagedBootImagesApplyConfiguration `json:"managedBootImages,omitempty"` + NodeDisruptionPolicy *NodeDisruptionPolicyConfigApplyConfiguration `json:"nodeDisruptionPolicy,omitempty"` +} + +// MachineConfigurationSpecApplyConfiguration constructs a declarative configuration of the MachineConfigurationSpec type for use with +// apply. +func MachineConfigurationSpec() *MachineConfigurationSpecApplyConfiguration { + return &MachineConfigurationSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *MachineConfigurationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *MachineConfigurationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *MachineConfigurationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *MachineConfigurationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *MachineConfigurationSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithForceRedeploymentReason sets the ForceRedeploymentReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithForceRedeploymentReason(value string) *MachineConfigurationSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.ForceRedeploymentReason = &value + return b +} + +// WithFailedRevisionLimit sets the FailedRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedRevisionLimit field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *MachineConfigurationSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.FailedRevisionLimit = &value + return b +} + +// WithSucceededRevisionLimit sets the SucceededRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *MachineConfigurationSpecApplyConfiguration { + b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value + return b +} + +// WithManagedBootImages sets the ManagedBootImages field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedBootImages field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithManagedBootImages(value *ManagedBootImagesApplyConfiguration) *MachineConfigurationSpecApplyConfiguration { + b.ManagedBootImages = value + return b +} + +// WithNodeDisruptionPolicy sets the NodeDisruptionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeDisruptionPolicy field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithNodeDisruptionPolicy(value *NodeDisruptionPolicyConfigApplyConfiguration) *MachineConfigurationSpecApplyConfiguration { + b.NodeDisruptionPolicy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go new file mode 100644 index 0000000000..073ca7c541 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go @@ -0,0 +1,59 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineConfigurationStatusApplyConfiguration represents a declarative configuration of the MachineConfigurationStatus type for use +// with apply. +type MachineConfigurationStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + NodeDisruptionPolicyStatus *NodeDisruptionPolicyStatusApplyConfiguration `json:"nodeDisruptionPolicyStatus,omitempty"` + ManagedBootImagesStatus *ManagedBootImagesApplyConfiguration `json:"managedBootImagesStatus,omitempty"` +} + +// MachineConfigurationStatusApplyConfiguration constructs a declarative configuration of the MachineConfigurationStatus type for use with +// apply. +func MachineConfigurationStatus() *MachineConfigurationStatusApplyConfiguration { + return &MachineConfigurationStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *MachineConfigurationStatusApplyConfiguration) WithObservedGeneration(value int64) *MachineConfigurationStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MachineConfigurationStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *MachineConfigurationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithNodeDisruptionPolicyStatus sets the NodeDisruptionPolicyStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeDisruptionPolicyStatus field is set to the value of the last call. +func (b *MachineConfigurationStatusApplyConfiguration) WithNodeDisruptionPolicyStatus(value *NodeDisruptionPolicyStatusApplyConfiguration) *MachineConfigurationStatusApplyConfiguration { + b.NodeDisruptionPolicyStatus = value + return b +} + +// WithManagedBootImagesStatus sets the ManagedBootImagesStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedBootImagesStatus field is set to the value of the last call. +func (b *MachineConfigurationStatusApplyConfiguration) WithManagedBootImagesStatus(value *ManagedBootImagesApplyConfiguration) *MachineConfigurationStatusApplyConfiguration { + b.ManagedBootImagesStatus = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go new file mode 100644 index 0000000000..d4a9f3c2cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanager.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// MachineManagerApplyConfiguration represents a declarative configuration of the MachineManager type for use +// with apply. +type MachineManagerApplyConfiguration struct { + Resource *operatorv1.MachineManagerMachineSetsResourceType `json:"resource,omitempty"` + APIGroup *operatorv1.MachineManagerMachineSetsAPIGroupType `json:"apiGroup,omitempty"` + Selection *MachineManagerSelectorApplyConfiguration `json:"selection,omitempty"` +} + +// MachineManagerApplyConfiguration constructs a declarative configuration of the MachineManager type for use with +// apply. +func MachineManager() *MachineManagerApplyConfiguration { + return &MachineManagerApplyConfiguration{} +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *MachineManagerApplyConfiguration) WithResource(value operatorv1.MachineManagerMachineSetsResourceType) *MachineManagerApplyConfiguration { + b.Resource = &value + return b +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *MachineManagerApplyConfiguration) WithAPIGroup(value operatorv1.MachineManagerMachineSetsAPIGroupType) *MachineManagerApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithSelection sets the Selection field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selection field is set to the value of the last call. +func (b *MachineManagerApplyConfiguration) WithSelection(value *MachineManagerSelectorApplyConfiguration) *MachineManagerApplyConfiguration { + b.Selection = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go new file mode 100644 index 0000000000..3bb44f21c3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machinemanagerselector.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// MachineManagerSelectorApplyConfiguration represents a declarative configuration of the MachineManagerSelector type for use +// with apply. +type MachineManagerSelectorApplyConfiguration struct { + Mode *operatorv1.MachineManagerSelectorMode `json:"mode,omitempty"` + Partial *PartialSelectorApplyConfiguration `json:"partial,omitempty"` +} + +// MachineManagerSelectorApplyConfiguration constructs a declarative configuration of the MachineManagerSelector type for use with +// apply. +func MachineManagerSelector() *MachineManagerSelectorApplyConfiguration { + return &MachineManagerSelectorApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *MachineManagerSelectorApplyConfiguration) WithMode(value operatorv1.MachineManagerSelectorMode) *MachineManagerSelectorApplyConfiguration { + b.Mode = &value + return b +} + +// WithPartial sets the Partial field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Partial field is set to the value of the last call. +func (b *MachineManagerSelectorApplyConfiguration) WithPartial(value *PartialSelectorApplyConfiguration) *MachineManagerSelectorApplyConfiguration { + b.Partial = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/managedbootimages.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/managedbootimages.go new file mode 100644 index 0000000000..aa8f94463b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/managedbootimages.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ManagedBootImagesApplyConfiguration represents a declarative configuration of the ManagedBootImages type for use +// with apply. +type ManagedBootImagesApplyConfiguration struct { + MachineManagers []MachineManagerApplyConfiguration `json:"machineManagers,omitempty"` +} + +// ManagedBootImagesApplyConfiguration constructs a declarative configuration of the ManagedBootImages type for use with +// apply. +func ManagedBootImages() *ManagedBootImagesApplyConfiguration { + return &ManagedBootImagesApplyConfiguration{} +} + +// WithMachineManagers adds the given value to the MachineManagers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MachineManagers field. +func (b *ManagedBootImagesApplyConfiguration) WithMachineManagers(values ...*MachineManagerApplyConfiguration) *ManagedBootImagesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMachineManagers") + } + b.MachineManagers = append(b.MachineManagers, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigration.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigration.go new file mode 100644 index 0000000000..9db99100ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigration.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MTUMigrationApplyConfiguration represents a declarative configuration of the MTUMigration type for use +// with apply. +type MTUMigrationApplyConfiguration struct { + Network *MTUMigrationValuesApplyConfiguration `json:"network,omitempty"` + Machine *MTUMigrationValuesApplyConfiguration `json:"machine,omitempty"` +} + +// MTUMigrationApplyConfiguration constructs a declarative configuration of the MTUMigration type for use with +// apply. +func MTUMigration() *MTUMigrationApplyConfiguration { + return &MTUMigrationApplyConfiguration{} +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *MTUMigrationApplyConfiguration) WithNetwork(value *MTUMigrationValuesApplyConfiguration) *MTUMigrationApplyConfiguration { + b.Network = value + return b +} + +// WithMachine sets the Machine field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Machine field is set to the value of the last call. +func (b *MTUMigrationApplyConfiguration) WithMachine(value *MTUMigrationValuesApplyConfiguration) *MTUMigrationApplyConfiguration { + b.Machine = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigrationvalues.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigrationvalues.go new file mode 100644 index 0000000000..8d346f25f4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/mtumigrationvalues.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MTUMigrationValuesApplyConfiguration represents a declarative configuration of the MTUMigrationValues type for use +// with apply. +type MTUMigrationValuesApplyConfiguration struct { + To *uint32 `json:"to,omitempty"` + From *uint32 `json:"from,omitempty"` +} + +// MTUMigrationValuesApplyConfiguration constructs a declarative configuration of the MTUMigrationValues type for use with +// apply. +func MTUMigrationValues() *MTUMigrationValuesApplyConfiguration { + return &MTUMigrationValuesApplyConfiguration{} +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *MTUMigrationValuesApplyConfiguration) WithTo(value uint32) *MTUMigrationValuesApplyConfiguration { + b.To = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *MTUMigrationValuesApplyConfiguration) WithFrom(value uint32) *MTUMigrationValuesApplyConfiguration { + b.From = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go new file mode 100644 index 0000000000..868906043b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/netflowconfig.go @@ -0,0 +1,29 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NetFlowConfigApplyConfiguration represents a declarative configuration of the NetFlowConfig type for use +// with apply. +type NetFlowConfigApplyConfiguration struct { + Collectors []operatorv1.IPPort `json:"collectors,omitempty"` +} + +// NetFlowConfigApplyConfiguration constructs a declarative configuration of the NetFlowConfig type for use with +// apply. +func NetFlowConfig() *NetFlowConfigApplyConfiguration { + return &NetFlowConfigApplyConfiguration{} +} + +// WithCollectors adds the given value to the Collectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Collectors field. +func (b *NetFlowConfigApplyConfiguration) WithCollectors(values ...operatorv1.IPPort) *NetFlowConfigApplyConfiguration { + for i := range values { + b.Collectors = append(b.Collectors, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go new file mode 100644 index 0000000000..0bdf453af7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NetworkApplyConfiguration represents a declarative configuration of the Network type for use +// with apply. +type NetworkApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"` + Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` +} + +// Network constructs a declarative configuration of the Network type for use with +// apply. +func Network(name string) *NetworkApplyConfiguration { + b := &NetworkApplyConfiguration{} + b.WithName(name) + b.WithKind("Network") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractNetwork extracts the applied configuration owned by fieldManager from +// network. If no managedFields are found in network for fieldManager, a +// NetworkApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// network must be a unmodified Network API object that was retrieved from the Kubernetes API. +// ExtractNetwork provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractNetwork(network *operatorv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { + return extractNetwork(network, fieldManager, "") +} + +// ExtractNetworkStatus is the same as ExtractNetwork except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractNetworkStatus(network *operatorv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) { + return extractNetwork(network, fieldManager, "status") +} + +func extractNetwork(network *operatorv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) { + b := &NetworkApplyConfiguration{} + err := managedfields.ExtractInto(network, internal.Parser().Type("com.github.openshift.api.operator.v1.Network"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(network.Name) + + b.WithKind("Network") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithGeneration(value int64) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *NetworkApplyConfiguration) WithFinalizers(values ...string) *NetworkApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *NetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithSpec(value *NetworkSpecApplyConfiguration) *NetworkApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfiguration) *NetworkApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkmigration.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkmigration.go new file mode 100644 index 0000000000..bf753bb177 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkmigration.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NetworkMigrationApplyConfiguration represents a declarative configuration of the NetworkMigration type for use +// with apply. +type NetworkMigrationApplyConfiguration struct { + MTU *MTUMigrationApplyConfiguration `json:"mtu,omitempty"` + NetworkType *string `json:"networkType,omitempty"` + Features *FeaturesMigrationApplyConfiguration `json:"features,omitempty"` + Mode *operatorv1.NetworkMigrationMode `json:"mode,omitempty"` +} + +// NetworkMigrationApplyConfiguration constructs a declarative configuration of the NetworkMigration type for use with +// apply. +func NetworkMigration() *NetworkMigrationApplyConfiguration { + return &NetworkMigrationApplyConfiguration{} +} + +// WithMTU sets the MTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MTU field is set to the value of the last call. +func (b *NetworkMigrationApplyConfiguration) WithMTU(value *MTUMigrationApplyConfiguration) *NetworkMigrationApplyConfiguration { + b.MTU = value + return b +} + +// WithNetworkType sets the NetworkType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkType field is set to the value of the last call. +func (b *NetworkMigrationApplyConfiguration) WithNetworkType(value string) *NetworkMigrationApplyConfiguration { + b.NetworkType = &value + return b +} + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NetworkMigrationApplyConfiguration) WithFeatures(value *FeaturesMigrationApplyConfiguration) *NetworkMigrationApplyConfiguration { + b.Features = value + return b +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *NetworkMigrationApplyConfiguration) WithMode(value operatorv1.NetworkMigrationMode) *NetworkMigrationApplyConfiguration { + b.Mode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go new file mode 100644 index 0000000000..66803aa952 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkspec.go @@ -0,0 +1,180 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// NetworkSpecApplyConfiguration represents a declarative configuration of the NetworkSpec type for use +// with apply. +type NetworkSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` + ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"` + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + DefaultNetwork *DefaultNetworkDefinitionApplyConfiguration `json:"defaultNetwork,omitempty"` + AdditionalNetworks []AdditionalNetworkDefinitionApplyConfiguration `json:"additionalNetworks,omitempty"` + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"` + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + DisableNetworkDiagnostics *bool `json:"disableNetworkDiagnostics,omitempty"` + KubeProxyConfig *ProxyConfigApplyConfiguration `json:"kubeProxyConfig,omitempty"` + ExportNetworkFlows *ExportNetworkFlowsApplyConfiguration `json:"exportNetworkFlows,omitempty"` + Migration *NetworkMigrationApplyConfiguration `json:"migration,omitempty"` + AdditionalRoutingCapabilities *AdditionalRoutingCapabilitiesApplyConfiguration `json:"additionalRoutingCapabilities,omitempty"` +} + +// NetworkSpecApplyConfiguration constructs a declarative configuration of the NetworkSpec type for use with +// apply. +func NetworkSpec() *NetworkSpecApplyConfiguration { + return &NetworkSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *NetworkSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *NetworkSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *NetworkSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *NetworkSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *NetworkSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithClusterNetwork adds the given value to the ClusterNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ClusterNetwork field. +func (b *NetworkSpecApplyConfiguration) WithClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *NetworkSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClusterNetwork") + } + b.ClusterNetwork = append(b.ClusterNetwork, *values[i]) + } + return b +} + +// WithServiceNetwork adds the given value to the ServiceNetwork field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceNetwork field. +func (b *NetworkSpecApplyConfiguration) WithServiceNetwork(values ...string) *NetworkSpecApplyConfiguration { + for i := range values { + b.ServiceNetwork = append(b.ServiceNetwork, values[i]) + } + return b +} + +// WithDefaultNetwork sets the DefaultNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultNetwork field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithDefaultNetwork(value *DefaultNetworkDefinitionApplyConfiguration) *NetworkSpecApplyConfiguration { + b.DefaultNetwork = value + return b +} + +// WithAdditionalNetworks adds the given value to the AdditionalNetworks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalNetworks field. +func (b *NetworkSpecApplyConfiguration) WithAdditionalNetworks(values ...*AdditionalNetworkDefinitionApplyConfiguration) *NetworkSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalNetworks") + } + b.AdditionalNetworks = append(b.AdditionalNetworks, *values[i]) + } + return b +} + +// WithDisableMultiNetwork sets the DisableMultiNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisableMultiNetwork field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithDisableMultiNetwork(value bool) *NetworkSpecApplyConfiguration { + b.DisableMultiNetwork = &value + return b +} + +// WithUseMultiNetworkPolicy sets the UseMultiNetworkPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UseMultiNetworkPolicy field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithUseMultiNetworkPolicy(value bool) *NetworkSpecApplyConfiguration { + b.UseMultiNetworkPolicy = &value + return b +} + +// WithDeployKubeProxy sets the DeployKubeProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeployKubeProxy field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithDeployKubeProxy(value bool) *NetworkSpecApplyConfiguration { + b.DeployKubeProxy = &value + return b +} + +// WithDisableNetworkDiagnostics sets the DisableNetworkDiagnostics field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisableNetworkDiagnostics field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithDisableNetworkDiagnostics(value bool) *NetworkSpecApplyConfiguration { + b.DisableNetworkDiagnostics = &value + return b +} + +// WithKubeProxyConfig sets the KubeProxyConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KubeProxyConfig field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithKubeProxyConfig(value *ProxyConfigApplyConfiguration) *NetworkSpecApplyConfiguration { + b.KubeProxyConfig = value + return b +} + +// WithExportNetworkFlows sets the ExportNetworkFlows field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExportNetworkFlows field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithExportNetworkFlows(value *ExportNetworkFlowsApplyConfiguration) *NetworkSpecApplyConfiguration { + b.ExportNetworkFlows = value + return b +} + +// WithMigration sets the Migration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Migration field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithMigration(value *NetworkMigrationApplyConfiguration) *NetworkSpecApplyConfiguration { + b.Migration = value + return b +} + +// WithAdditionalRoutingCapabilities sets the AdditionalRoutingCapabilities field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdditionalRoutingCapabilities field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithAdditionalRoutingCapabilities(value *AdditionalRoutingCapabilitiesApplyConfiguration) *NetworkSpecApplyConfiguration { + b.AdditionalRoutingCapabilities = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go new file mode 100644 index 0000000000..9753b2161b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/networkstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NetworkStatusApplyConfiguration represents a declarative configuration of the NetworkStatus type for use +// with apply. +type NetworkStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// NetworkStatusApplyConfiguration constructs a declarative configuration of the NetworkStatus type for use with +// apply. +func NetworkStatus() *NetworkStatusApplyConfiguration { + return &NetworkStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithObservedGeneration(value int64) *NetworkStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *NetworkStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *NetworkStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithVersion(value string) *NetworkStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithReadyReplicas(value int32) *NetworkStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *NetworkStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *NetworkStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *NetworkStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *NetworkStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyclusterstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyclusterstatus.go new file mode 100644 index 0000000000..1f0d765465 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyclusterstatus.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicyClusterStatusApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyClusterStatus type for use +// with apply. +type NodeDisruptionPolicyClusterStatusApplyConfiguration struct { + Files []NodeDisruptionPolicyStatusFileApplyConfiguration `json:"files,omitempty"` + Units []NodeDisruptionPolicyStatusUnitApplyConfiguration `json:"units,omitempty"` + SSHKey *NodeDisruptionPolicyStatusSSHKeyApplyConfiguration `json:"sshkey,omitempty"` +} + +// NodeDisruptionPolicyClusterStatusApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyClusterStatus type for use with +// apply. +func NodeDisruptionPolicyClusterStatus() *NodeDisruptionPolicyClusterStatusApplyConfiguration { + return &NodeDisruptionPolicyClusterStatusApplyConfiguration{} +} + +// WithFiles adds the given value to the Files field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Files field. +func (b *NodeDisruptionPolicyClusterStatusApplyConfiguration) WithFiles(values ...*NodeDisruptionPolicyStatusFileApplyConfiguration) *NodeDisruptionPolicyClusterStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFiles") + } + b.Files = append(b.Files, *values[i]) + } + return b +} + +// WithUnits adds the given value to the Units field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Units field. +func (b *NodeDisruptionPolicyClusterStatusApplyConfiguration) WithUnits(values ...*NodeDisruptionPolicyStatusUnitApplyConfiguration) *NodeDisruptionPolicyClusterStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithUnits") + } + b.Units = append(b.Units, *values[i]) + } + return b +} + +// WithSSHKey sets the SSHKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SSHKey field is set to the value of the last call. +func (b *NodeDisruptionPolicyClusterStatusApplyConfiguration) WithSSHKey(value *NodeDisruptionPolicyStatusSSHKeyApplyConfiguration) *NodeDisruptionPolicyClusterStatusApplyConfiguration { + b.SSHKey = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyconfig.go new file mode 100644 index 0000000000..92db16f834 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyconfig.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicyConfigApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyConfig type for use +// with apply. +type NodeDisruptionPolicyConfigApplyConfiguration struct { + Files []NodeDisruptionPolicySpecFileApplyConfiguration `json:"files,omitempty"` + Units []NodeDisruptionPolicySpecUnitApplyConfiguration `json:"units,omitempty"` + SSHKey *NodeDisruptionPolicySpecSSHKeyApplyConfiguration `json:"sshkey,omitempty"` +} + +// NodeDisruptionPolicyConfigApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyConfig type for use with +// apply. +func NodeDisruptionPolicyConfig() *NodeDisruptionPolicyConfigApplyConfiguration { + return &NodeDisruptionPolicyConfigApplyConfiguration{} +} + +// WithFiles adds the given value to the Files field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Files field. +func (b *NodeDisruptionPolicyConfigApplyConfiguration) WithFiles(values ...*NodeDisruptionPolicySpecFileApplyConfiguration) *NodeDisruptionPolicyConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFiles") + } + b.Files = append(b.Files, *values[i]) + } + return b +} + +// WithUnits adds the given value to the Units field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Units field. +func (b *NodeDisruptionPolicyConfigApplyConfiguration) WithUnits(values ...*NodeDisruptionPolicySpecUnitApplyConfiguration) *NodeDisruptionPolicyConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithUnits") + } + b.Units = append(b.Units, *values[i]) + } + return b +} + +// WithSSHKey sets the SSHKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SSHKey field is set to the value of the last call. +func (b *NodeDisruptionPolicyConfigApplyConfiguration) WithSSHKey(value *NodeDisruptionPolicySpecSSHKeyApplyConfiguration) *NodeDisruptionPolicyConfigApplyConfiguration { + b.SSHKey = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go new file mode 100644 index 0000000000..2421469d84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecaction.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NodeDisruptionPolicySpecActionApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicySpecAction type for use +// with apply. +type NodeDisruptionPolicySpecActionApplyConfiguration struct { + Type *operatorv1.NodeDisruptionPolicySpecActionType `json:"type,omitempty"` + Reload *ReloadServiceApplyConfiguration `json:"reload,omitempty"` + Restart *RestartServiceApplyConfiguration `json:"restart,omitempty"` +} + +// NodeDisruptionPolicySpecActionApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicySpecAction type for use with +// apply. +func NodeDisruptionPolicySpecAction() *NodeDisruptionPolicySpecActionApplyConfiguration { + return &NodeDisruptionPolicySpecActionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *NodeDisruptionPolicySpecActionApplyConfiguration) WithType(value operatorv1.NodeDisruptionPolicySpecActionType) *NodeDisruptionPolicySpecActionApplyConfiguration { + b.Type = &value + return b +} + +// WithReload sets the Reload field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reload field is set to the value of the last call. +func (b *NodeDisruptionPolicySpecActionApplyConfiguration) WithReload(value *ReloadServiceApplyConfiguration) *NodeDisruptionPolicySpecActionApplyConfiguration { + b.Reload = value + return b +} + +// WithRestart sets the Restart field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Restart field is set to the value of the last call. +func (b *NodeDisruptionPolicySpecActionApplyConfiguration) WithRestart(value *RestartServiceApplyConfiguration) *NodeDisruptionPolicySpecActionApplyConfiguration { + b.Restart = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecfile.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecfile.go new file mode 100644 index 0000000000..85884dc070 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecfile.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicySpecFileApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicySpecFile type for use +// with apply. +type NodeDisruptionPolicySpecFileApplyConfiguration struct { + Path *string `json:"path,omitempty"` + Actions []NodeDisruptionPolicySpecActionApplyConfiguration `json:"actions,omitempty"` +} + +// NodeDisruptionPolicySpecFileApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicySpecFile type for use with +// apply. +func NodeDisruptionPolicySpecFile() *NodeDisruptionPolicySpecFileApplyConfiguration { + return &NodeDisruptionPolicySpecFileApplyConfiguration{} +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *NodeDisruptionPolicySpecFileApplyConfiguration) WithPath(value string) *NodeDisruptionPolicySpecFileApplyConfiguration { + b.Path = &value + return b +} + +// WithActions adds the given value to the Actions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Actions field. +func (b *NodeDisruptionPolicySpecFileApplyConfiguration) WithActions(values ...*NodeDisruptionPolicySpecActionApplyConfiguration) *NodeDisruptionPolicySpecFileApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithActions") + } + b.Actions = append(b.Actions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecsshkey.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecsshkey.go new file mode 100644 index 0000000000..b7ae1c75ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecsshkey.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicySpecSSHKeyApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicySpecSSHKey type for use +// with apply. +type NodeDisruptionPolicySpecSSHKeyApplyConfiguration struct { + Actions []NodeDisruptionPolicySpecActionApplyConfiguration `json:"actions,omitempty"` +} + +// NodeDisruptionPolicySpecSSHKeyApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicySpecSSHKey type for use with +// apply. +func NodeDisruptionPolicySpecSSHKey() *NodeDisruptionPolicySpecSSHKeyApplyConfiguration { + return &NodeDisruptionPolicySpecSSHKeyApplyConfiguration{} +} + +// WithActions adds the given value to the Actions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Actions field. +func (b *NodeDisruptionPolicySpecSSHKeyApplyConfiguration) WithActions(values ...*NodeDisruptionPolicySpecActionApplyConfiguration) *NodeDisruptionPolicySpecSSHKeyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithActions") + } + b.Actions = append(b.Actions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go new file mode 100644 index 0000000000..d368f1c0c1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicyspecunit.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NodeDisruptionPolicySpecUnitApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicySpecUnit type for use +// with apply. +type NodeDisruptionPolicySpecUnitApplyConfiguration struct { + Name *operatorv1.NodeDisruptionPolicyServiceName `json:"name,omitempty"` + Actions []NodeDisruptionPolicySpecActionApplyConfiguration `json:"actions,omitempty"` +} + +// NodeDisruptionPolicySpecUnitApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicySpecUnit type for use with +// apply. +func NodeDisruptionPolicySpecUnit() *NodeDisruptionPolicySpecUnitApplyConfiguration { + return &NodeDisruptionPolicySpecUnitApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NodeDisruptionPolicySpecUnitApplyConfiguration) WithName(value operatorv1.NodeDisruptionPolicyServiceName) *NodeDisruptionPolicySpecUnitApplyConfiguration { + b.Name = &value + return b +} + +// WithActions adds the given value to the Actions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Actions field. +func (b *NodeDisruptionPolicySpecUnitApplyConfiguration) WithActions(values ...*NodeDisruptionPolicySpecActionApplyConfiguration) *NodeDisruptionPolicySpecUnitApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithActions") + } + b.Actions = append(b.Actions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatus.go new file mode 100644 index 0000000000..cf424c3fc6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatus.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicyStatusApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatus type for use +// with apply. +type NodeDisruptionPolicyStatusApplyConfiguration struct { + ClusterPolicies *NodeDisruptionPolicyClusterStatusApplyConfiguration `json:"clusterPolicies,omitempty"` +} + +// NodeDisruptionPolicyStatusApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyStatus type for use with +// apply. +func NodeDisruptionPolicyStatus() *NodeDisruptionPolicyStatusApplyConfiguration { + return &NodeDisruptionPolicyStatusApplyConfiguration{} +} + +// WithClusterPolicies sets the ClusterPolicies field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterPolicies field is set to the value of the last call. +func (b *NodeDisruptionPolicyStatusApplyConfiguration) WithClusterPolicies(value *NodeDisruptionPolicyClusterStatusApplyConfiguration) *NodeDisruptionPolicyStatusApplyConfiguration { + b.ClusterPolicies = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go new file mode 100644 index 0000000000..05afe97a6c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusaction.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NodeDisruptionPolicyStatusActionApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatusAction type for use +// with apply. +type NodeDisruptionPolicyStatusActionApplyConfiguration struct { + Type *operatorv1.NodeDisruptionPolicyStatusActionType `json:"type,omitempty"` + Reload *ReloadServiceApplyConfiguration `json:"reload,omitempty"` + Restart *RestartServiceApplyConfiguration `json:"restart,omitempty"` +} + +// NodeDisruptionPolicyStatusActionApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyStatusAction type for use with +// apply. +func NodeDisruptionPolicyStatusAction() *NodeDisruptionPolicyStatusActionApplyConfiguration { + return &NodeDisruptionPolicyStatusActionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *NodeDisruptionPolicyStatusActionApplyConfiguration) WithType(value operatorv1.NodeDisruptionPolicyStatusActionType) *NodeDisruptionPolicyStatusActionApplyConfiguration { + b.Type = &value + return b +} + +// WithReload sets the Reload field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reload field is set to the value of the last call. +func (b *NodeDisruptionPolicyStatusActionApplyConfiguration) WithReload(value *ReloadServiceApplyConfiguration) *NodeDisruptionPolicyStatusActionApplyConfiguration { + b.Reload = value + return b +} + +// WithRestart sets the Restart field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Restart field is set to the value of the last call. +func (b *NodeDisruptionPolicyStatusActionApplyConfiguration) WithRestart(value *RestartServiceApplyConfiguration) *NodeDisruptionPolicyStatusActionApplyConfiguration { + b.Restart = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusfile.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusfile.go new file mode 100644 index 0000000000..e1a0436f27 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusfile.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicyStatusFileApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatusFile type for use +// with apply. +type NodeDisruptionPolicyStatusFileApplyConfiguration struct { + Path *string `json:"path,omitempty"` + Actions []NodeDisruptionPolicyStatusActionApplyConfiguration `json:"actions,omitempty"` +} + +// NodeDisruptionPolicyStatusFileApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyStatusFile type for use with +// apply. +func NodeDisruptionPolicyStatusFile() *NodeDisruptionPolicyStatusFileApplyConfiguration { + return &NodeDisruptionPolicyStatusFileApplyConfiguration{} +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *NodeDisruptionPolicyStatusFileApplyConfiguration) WithPath(value string) *NodeDisruptionPolicyStatusFileApplyConfiguration { + b.Path = &value + return b +} + +// WithActions adds the given value to the Actions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Actions field. +func (b *NodeDisruptionPolicyStatusFileApplyConfiguration) WithActions(values ...*NodeDisruptionPolicyStatusActionApplyConfiguration) *NodeDisruptionPolicyStatusFileApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithActions") + } + b.Actions = append(b.Actions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatussshkey.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatussshkey.go new file mode 100644 index 0000000000..4ad78a79d2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatussshkey.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeDisruptionPolicyStatusSSHKeyApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatusSSHKey type for use +// with apply. +type NodeDisruptionPolicyStatusSSHKeyApplyConfiguration struct { + Actions []NodeDisruptionPolicyStatusActionApplyConfiguration `json:"actions,omitempty"` +} + +// NodeDisruptionPolicyStatusSSHKeyApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyStatusSSHKey type for use with +// apply. +func NodeDisruptionPolicyStatusSSHKey() *NodeDisruptionPolicyStatusSSHKeyApplyConfiguration { + return &NodeDisruptionPolicyStatusSSHKeyApplyConfiguration{} +} + +// WithActions adds the given value to the Actions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Actions field. +func (b *NodeDisruptionPolicyStatusSSHKeyApplyConfiguration) WithActions(values ...*NodeDisruptionPolicyStatusActionApplyConfiguration) *NodeDisruptionPolicyStatusSSHKeyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithActions") + } + b.Actions = append(b.Actions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go new file mode 100644 index 0000000000..5d97a26615 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodedisruptionpolicystatusunit.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NodeDisruptionPolicyStatusUnitApplyConfiguration represents a declarative configuration of the NodeDisruptionPolicyStatusUnit type for use +// with apply. +type NodeDisruptionPolicyStatusUnitApplyConfiguration struct { + Name *operatorv1.NodeDisruptionPolicyServiceName `json:"name,omitempty"` + Actions []NodeDisruptionPolicyStatusActionApplyConfiguration `json:"actions,omitempty"` +} + +// NodeDisruptionPolicyStatusUnitApplyConfiguration constructs a declarative configuration of the NodeDisruptionPolicyStatusUnit type for use with +// apply. +func NodeDisruptionPolicyStatusUnit() *NodeDisruptionPolicyStatusUnitApplyConfiguration { + return &NodeDisruptionPolicyStatusUnitApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NodeDisruptionPolicyStatusUnitApplyConfiguration) WithName(value operatorv1.NodeDisruptionPolicyServiceName) *NodeDisruptionPolicyStatusUnitApplyConfiguration { + b.Name = &value + return b +} + +// WithActions adds the given value to the Actions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Actions field. +func (b *NodeDisruptionPolicyStatusUnitApplyConfiguration) WithActions(values ...*NodeDisruptionPolicyStatusActionApplyConfiguration) *NodeDisruptionPolicyStatusUnitApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithActions") + } + b.Actions = append(b.Actions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go new file mode 100644 index 0000000000..a9fca29635 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeplacement.go @@ -0,0 +1,39 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NodePlacementApplyConfiguration represents a declarative configuration of the NodePlacement type for use +// with apply. +type NodePlacementApplyConfiguration struct { + NodeSelector *metav1.LabelSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// NodePlacementApplyConfiguration constructs a declarative configuration of the NodePlacement type for use with +// apply. +func NodePlacement() *NodePlacementApplyConfiguration { + return &NodePlacementApplyConfiguration{} +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *NodePlacementApplyConfiguration) WithNodeSelector(value *metav1.LabelSelectorApplyConfiguration) *NodePlacementApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NodePlacementApplyConfiguration) WithTolerations(values ...corev1.Toleration) *NodePlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go new file mode 100644 index 0000000000..cb3f65687e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodeportstrategy.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NodePortStrategyApplyConfiguration represents a declarative configuration of the NodePortStrategy type for use +// with apply. +type NodePortStrategyApplyConfiguration struct { + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` +} + +// NodePortStrategyApplyConfiguration constructs a declarative configuration of the NodePortStrategy type for use with +// apply. +func NodePortStrategy() *NodePortStrategyApplyConfiguration { + return &NodePortStrategyApplyConfiguration{} +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *NodePortStrategyApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *NodePortStrategyApplyConfiguration { + b.Protocol = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go new file mode 100644 index 0000000000..3c53a88f0e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nodestatus.go @@ -0,0 +1,101 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use +// with apply. +type NodeStatusApplyConfiguration struct { + NodeName *string `json:"nodeName,omitempty"` + CurrentRevision *int32 `json:"currentRevision,omitempty"` + TargetRevision *int32 `json:"targetRevision,omitempty"` + LastFailedRevision *int32 `json:"lastFailedRevision,omitempty"` + LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` + LastFailedReason *string `json:"lastFailedReason,omitempty"` + LastFailedCount *int `json:"lastFailedCount,omitempty"` + LastFallbackCount *int `json:"lastFallbackCount,omitempty"` + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` +} + +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with +// apply. +func NodeStatus() *NodeStatusApplyConfiguration { + return &NodeStatusApplyConfiguration{} +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithNodeName(value string) *NodeStatusApplyConfiguration { + b.NodeName = &value + return b +} + +// WithCurrentRevision sets the CurrentRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentRevision field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithCurrentRevision(value int32) *NodeStatusApplyConfiguration { + b.CurrentRevision = &value + return b +} + +// WithTargetRevision sets the TargetRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetRevision field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithTargetRevision(value int32) *NodeStatusApplyConfiguration { + b.TargetRevision = &value + return b +} + +// WithLastFailedRevision sets the LastFailedRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFailedRevision field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithLastFailedRevision(value int32) *NodeStatusApplyConfiguration { + b.LastFailedRevision = &value + return b +} + +// WithLastFailedTime sets the LastFailedTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFailedTime field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithLastFailedTime(value metav1.Time) *NodeStatusApplyConfiguration { + b.LastFailedTime = &value + return b +} + +// WithLastFailedReason sets the LastFailedReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFailedReason field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithLastFailedReason(value string) *NodeStatusApplyConfiguration { + b.LastFailedReason = &value + return b +} + +// WithLastFailedCount sets the LastFailedCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFailedCount field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithLastFailedCount(value int) *NodeStatusApplyConfiguration { + b.LastFailedCount = &value + return b +} + +// WithLastFallbackCount sets the LastFallbackCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastFallbackCount field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithLastFallbackCount(value int) *NodeStatusApplyConfiguration { + b.LastFallbackCount = &value + return b +} + +// WithLastFailedRevisionErrors adds the given value to the LastFailedRevisionErrors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the LastFailedRevisionErrors field. +func (b *NodeStatusApplyConfiguration) WithLastFailedRevisionErrors(values ...string) *NodeStatusApplyConfiguration { + for i := range values { + b.LastFailedRevisionErrors = append(b.LastFailedRevisionErrors, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/oauthapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/oauthapiserverstatus.go new file mode 100644 index 0000000000..68f43886aa --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/oauthapiserverstatus.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OAuthAPIServerStatusApplyConfiguration represents a declarative configuration of the OAuthAPIServerStatus type for use +// with apply. +type OAuthAPIServerStatusApplyConfiguration struct { + LatestAvailableRevision *int32 `json:"latestAvailableRevision,omitempty"` +} + +// OAuthAPIServerStatusApplyConfiguration constructs a declarative configuration of the OAuthAPIServerStatus type for use with +// apply. +func OAuthAPIServerStatus() *OAuthAPIServerStatusApplyConfiguration { + return &OAuthAPIServerStatusApplyConfiguration{} +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OAuthAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OAuthAPIServerStatusApplyConfiguration { + b.LatestAvailableRevision = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go new file mode 100644 index 0000000000..93b3c4e79a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OLMApplyConfiguration represents a declarative configuration of the OLM type for use +// with apply. +type OLMApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OLMSpecApplyConfiguration `json:"spec,omitempty"` + Status *OLMStatusApplyConfiguration `json:"status,omitempty"` +} + +// OLM constructs a declarative configuration of the OLM type for use with +// apply. +func OLM(name string) *OLMApplyConfiguration { + b := &OLMApplyConfiguration{} + b.WithName(name) + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractOLM extracts the applied configuration owned by fieldManager from +// oLM. If no managedFields are found in oLM for fieldManager, a +// OLMApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oLM must be a unmodified OLM API object that was retrieved from the Kubernetes API. +// ExtractOLM provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOLM(oLM *operatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "") +} + +// ExtractOLMStatus is the same as ExtractOLM except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOLMStatus(oLM *operatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "status") +} + +func extractOLM(oLM *operatorv1.OLM, fieldManager string, subresource string) (*OLMApplyConfiguration, error) { + b := &OLMApplyConfiguration{} + err := managedfields.ExtractInto(oLM, internal.Parser().Type("com.github.openshift.api.operator.v1.OLM"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oLM.Name) + + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithKind(value string) *OLMApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithAPIVersion(value string) *OLMApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGenerateName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithNamespace(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithUID(value types.UID) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithResourceVersion(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGeneration(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OLMApplyConfiguration) WithLabels(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OLMApplyConfiguration) WithAnnotations(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OLMApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OLMApplyConfiguration) WithFinalizers(values ...string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OLMApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithSpec(value *OLMSpecApplyConfiguration) *OLMApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithStatus(value *OLMStatusApplyConfiguration) *OLMApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OLMApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go new file mode 100644 index 0000000000..69309aa491 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OLMSpecApplyConfiguration represents a declarative configuration of the OLMSpec type for use +// with apply. +type OLMSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// OLMSpecApplyConfiguration constructs a declarative configuration of the OLMSpec type for use with +// apply. +func OLMSpec() *OLMSpecApplyConfiguration { + return &OLMSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go new file mode 100644 index 0000000000..3c2d4a184c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OLMStatusApplyConfiguration represents a declarative configuration of the OLMStatus type for use +// with apply. +type OLMStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// OLMStatusApplyConfiguration constructs a declarative configuration of the OLMStatus type for use with +// apply. +func OLMStatus() *OLMStatusApplyConfiguration { + return &OLMStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithObservedGeneration(value int64) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OLMStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithVersion(value string) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithReadyReplicas(value int32) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OLMStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OLMStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go new file mode 100644 index 0000000000..f78286043a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OpenShiftAPIServerApplyConfiguration represents a declarative configuration of the OpenShiftAPIServer type for use +// with apply. +type OpenShiftAPIServerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OpenShiftAPIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *OpenShiftAPIServerStatusApplyConfiguration `json:"status,omitempty"` +} + +// OpenShiftAPIServer constructs a declarative configuration of the OpenShiftAPIServer type for use with +// apply. +func OpenShiftAPIServer(name string) *OpenShiftAPIServerApplyConfiguration { + b := &OpenShiftAPIServerApplyConfiguration{} + b.WithName(name) + b.WithKind("OpenShiftAPIServer") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractOpenShiftAPIServer extracts the applied configuration owned by fieldManager from +// openShiftAPIServer. If no managedFields are found in openShiftAPIServer for fieldManager, a +// OpenShiftAPIServerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// openShiftAPIServer must be a unmodified OpenShiftAPIServer API object that was retrieved from the Kubernetes API. +// ExtractOpenShiftAPIServer provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOpenShiftAPIServer(openShiftAPIServer *operatorv1.OpenShiftAPIServer, fieldManager string) (*OpenShiftAPIServerApplyConfiguration, error) { + return extractOpenShiftAPIServer(openShiftAPIServer, fieldManager, "") +} + +// ExtractOpenShiftAPIServerStatus is the same as ExtractOpenShiftAPIServer except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOpenShiftAPIServerStatus(openShiftAPIServer *operatorv1.OpenShiftAPIServer, fieldManager string) (*OpenShiftAPIServerApplyConfiguration, error) { + return extractOpenShiftAPIServer(openShiftAPIServer, fieldManager, "status") +} + +func extractOpenShiftAPIServer(openShiftAPIServer *operatorv1.OpenShiftAPIServer, fieldManager string, subresource string) (*OpenShiftAPIServerApplyConfiguration, error) { + b := &OpenShiftAPIServerApplyConfiguration{} + err := managedfields.ExtractInto(openShiftAPIServer, internal.Parser().Type("com.github.openshift.api.operator.v1.OpenShiftAPIServer"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(openShiftAPIServer.Name) + + b.WithKind("OpenShiftAPIServer") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithKind(value string) *OpenShiftAPIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithAPIVersion(value string) *OpenShiftAPIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithName(value string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithGenerateName(value string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithNamespace(value string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithUID(value types.UID) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithResourceVersion(value string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithGeneration(value int64) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OpenShiftAPIServerApplyConfiguration) WithLabels(entries map[string]string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OpenShiftAPIServerApplyConfiguration) WithAnnotations(entries map[string]string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OpenShiftAPIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OpenShiftAPIServerApplyConfiguration) WithFinalizers(values ...string) *OpenShiftAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OpenShiftAPIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithSpec(value *OpenShiftAPIServerSpecApplyConfiguration) *OpenShiftAPIServerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OpenShiftAPIServerApplyConfiguration) WithStatus(value *OpenShiftAPIServerStatusApplyConfiguration) *OpenShiftAPIServerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OpenShiftAPIServerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go new file mode 100644 index 0000000000..562b43032d --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpenShiftAPIServerSpecApplyConfiguration represents a declarative configuration of the OpenShiftAPIServerSpec type for use +// with apply. +type OpenShiftAPIServerSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// OpenShiftAPIServerSpecApplyConfiguration constructs a declarative configuration of the OpenShiftAPIServerSpec type for use with +// apply. +func OpenShiftAPIServerSpec() *OpenShiftAPIServerSpecApplyConfiguration { + return &OpenShiftAPIServerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OpenShiftAPIServerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OpenShiftAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OpenShiftAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OpenShiftAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OpenShiftAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OpenShiftAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OpenShiftAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OpenShiftAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OpenShiftAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OpenShiftAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go new file mode 100644 index 0000000000..776701d546 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserverstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OpenShiftAPIServerStatusApplyConfiguration represents a declarative configuration of the OpenShiftAPIServerStatus type for use +// with apply. +type OpenShiftAPIServerStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// OpenShiftAPIServerStatusApplyConfiguration constructs a declarative configuration of the OpenShiftAPIServerStatus type for use with +// apply. +func OpenShiftAPIServerStatus() *OpenShiftAPIServerStatusApplyConfiguration { + return &OpenShiftAPIServerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OpenShiftAPIServerStatusApplyConfiguration) WithObservedGeneration(value int64) *OpenShiftAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OpenShiftAPIServerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OpenShiftAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OpenShiftAPIServerStatusApplyConfiguration) WithVersion(value string) *OpenShiftAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OpenShiftAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) *OpenShiftAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OpenShiftAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OpenShiftAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OpenShiftAPIServerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OpenShiftAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go new file mode 100644 index 0000000000..d8dbb4848f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OpenShiftControllerManagerApplyConfiguration represents a declarative configuration of the OpenShiftControllerManager type for use +// with apply. +type OpenShiftControllerManagerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OpenShiftControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *OpenShiftControllerManagerStatusApplyConfiguration `json:"status,omitempty"` +} + +// OpenShiftControllerManager constructs a declarative configuration of the OpenShiftControllerManager type for use with +// apply. +func OpenShiftControllerManager(name string) *OpenShiftControllerManagerApplyConfiguration { + b := &OpenShiftControllerManagerApplyConfiguration{} + b.WithName(name) + b.WithKind("OpenShiftControllerManager") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractOpenShiftControllerManager extracts the applied configuration owned by fieldManager from +// openShiftControllerManager. If no managedFields are found in openShiftControllerManager for fieldManager, a +// OpenShiftControllerManagerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// openShiftControllerManager must be a unmodified OpenShiftControllerManager API object that was retrieved from the Kubernetes API. +// ExtractOpenShiftControllerManager provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOpenShiftControllerManager(openShiftControllerManager *operatorv1.OpenShiftControllerManager, fieldManager string) (*OpenShiftControllerManagerApplyConfiguration, error) { + return extractOpenShiftControllerManager(openShiftControllerManager, fieldManager, "") +} + +// ExtractOpenShiftControllerManagerStatus is the same as ExtractOpenShiftControllerManager except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOpenShiftControllerManagerStatus(openShiftControllerManager *operatorv1.OpenShiftControllerManager, fieldManager string) (*OpenShiftControllerManagerApplyConfiguration, error) { + return extractOpenShiftControllerManager(openShiftControllerManager, fieldManager, "status") +} + +func extractOpenShiftControllerManager(openShiftControllerManager *operatorv1.OpenShiftControllerManager, fieldManager string, subresource string) (*OpenShiftControllerManagerApplyConfiguration, error) { + b := &OpenShiftControllerManagerApplyConfiguration{} + err := managedfields.ExtractInto(openShiftControllerManager, internal.Parser().Type("com.github.openshift.api.operator.v1.OpenShiftControllerManager"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(openShiftControllerManager.Name) + + b.WithKind("OpenShiftControllerManager") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithKind(value string) *OpenShiftControllerManagerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithAPIVersion(value string) *OpenShiftControllerManagerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithName(value string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithGenerateName(value string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithNamespace(value string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithUID(value types.UID) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithResourceVersion(value string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithGeneration(value int64) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OpenShiftControllerManagerApplyConfiguration) WithLabels(entries map[string]string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OpenShiftControllerManagerApplyConfiguration) WithAnnotations(entries map[string]string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OpenShiftControllerManagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OpenShiftControllerManagerApplyConfiguration) WithFinalizers(values ...string) *OpenShiftControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OpenShiftControllerManagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithSpec(value *OpenShiftControllerManagerSpecApplyConfiguration) *OpenShiftControllerManagerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OpenShiftControllerManagerApplyConfiguration) WithStatus(value *OpenShiftControllerManagerStatusApplyConfiguration) *OpenShiftControllerManagerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OpenShiftControllerManagerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go new file mode 100644 index 0000000000..5c6a0ff506 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpenShiftControllerManagerSpecApplyConfiguration represents a declarative configuration of the OpenShiftControllerManagerSpec type for use +// with apply. +type OpenShiftControllerManagerSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// OpenShiftControllerManagerSpecApplyConfiguration constructs a declarative configuration of the OpenShiftControllerManagerSpec type for use with +// apply. +func OpenShiftControllerManagerSpec() *OpenShiftControllerManagerSpecApplyConfiguration { + return &OpenShiftControllerManagerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OpenShiftControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OpenShiftControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OpenShiftControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OpenShiftControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OpenShiftControllerManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OpenShiftControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go new file mode 100644 index 0000000000..c5b960e234 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanagerstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OpenShiftControllerManagerStatusApplyConfiguration represents a declarative configuration of the OpenShiftControllerManagerStatus type for use +// with apply. +type OpenShiftControllerManagerStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// OpenShiftControllerManagerStatusApplyConfiguration constructs a declarative configuration of the OpenShiftControllerManagerStatus type for use with +// apply. +func OpenShiftControllerManagerStatus() *OpenShiftControllerManagerStatusApplyConfiguration { + return &OpenShiftControllerManagerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *OpenShiftControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OpenShiftControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithVersion(value string) *OpenShiftControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *OpenShiftControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OpenShiftControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OpenShiftControllerManagerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OpenShiftControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go new file mode 100644 index 0000000000..b1dd640f69 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftsdnconfig.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// OpenShiftSDNConfigApplyConfiguration represents a declarative configuration of the OpenShiftSDNConfig type for use +// with apply. +type OpenShiftSDNConfigApplyConfiguration struct { + Mode *operatorv1.SDNMode `json:"mode,omitempty"` + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + MTU *uint32 `json:"mtu,omitempty"` + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + EnableUnidling *bool `json:"enableUnidling,omitempty"` +} + +// OpenShiftSDNConfigApplyConfiguration constructs a declarative configuration of the OpenShiftSDNConfig type for use with +// apply. +func OpenShiftSDNConfig() *OpenShiftSDNConfigApplyConfiguration { + return &OpenShiftSDNConfigApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *OpenShiftSDNConfigApplyConfiguration) WithMode(value operatorv1.SDNMode) *OpenShiftSDNConfigApplyConfiguration { + b.Mode = &value + return b +} + +// WithVXLANPort sets the VXLANPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VXLANPort field is set to the value of the last call. +func (b *OpenShiftSDNConfigApplyConfiguration) WithVXLANPort(value uint32) *OpenShiftSDNConfigApplyConfiguration { + b.VXLANPort = &value + return b +} + +// WithMTU sets the MTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MTU field is set to the value of the last call. +func (b *OpenShiftSDNConfigApplyConfiguration) WithMTU(value uint32) *OpenShiftSDNConfigApplyConfiguration { + b.MTU = &value + return b +} + +// WithUseExternalOpenvswitch sets the UseExternalOpenvswitch field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UseExternalOpenvswitch field is set to the value of the last call. +func (b *OpenShiftSDNConfigApplyConfiguration) WithUseExternalOpenvswitch(value bool) *OpenShiftSDNConfigApplyConfiguration { + b.UseExternalOpenvswitch = &value + return b +} + +// WithEnableUnidling sets the EnableUnidling field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EnableUnidling field is set to the value of the last call. +func (b *OpenShiftSDNConfigApplyConfiguration) WithEnableUnidling(value bool) *OpenShiftSDNConfigApplyConfiguration { + b.EnableUnidling = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go new file mode 100644 index 0000000000..811b2330b7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OpenStackLoadBalancerParametersApplyConfiguration represents a declarative configuration of the OpenStackLoadBalancerParameters type for use +// with apply. +type OpenStackLoadBalancerParametersApplyConfiguration struct { + FloatingIP *string `json:"floatingIP,omitempty"` +} + +// OpenStackLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the OpenStackLoadBalancerParameters type for use with +// apply. +func OpenStackLoadBalancerParameters() *OpenStackLoadBalancerParametersApplyConfiguration { + return &OpenStackLoadBalancerParametersApplyConfiguration{} +} + +// WithFloatingIP sets the FloatingIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FloatingIP field is set to the value of the last call. +func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithFloatingIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { + b.FloatingIP = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go new file mode 100644 index 0000000000..57bffabd26 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorcondition.go @@ -0,0 +1,64 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorConditionApplyConfiguration represents a declarative configuration of the OperatorCondition type for use +// with apply. +type OperatorConditionApplyConfiguration struct { + Type *string `json:"type,omitempty"` + Status *operatorv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// OperatorConditionApplyConfiguration constructs a declarative configuration of the OperatorCondition type for use with +// apply. +func OperatorCondition() *OperatorConditionApplyConfiguration { + return &OperatorConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *OperatorConditionApplyConfiguration) WithType(value string) *OperatorConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OperatorConditionApplyConfiguration) WithStatus(value operatorv1.ConditionStatus) *OperatorConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *OperatorConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *OperatorConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *OperatorConditionApplyConfiguration) WithReason(value string) *OperatorConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *OperatorConditionApplyConfiguration) WithMessage(value string) *OperatorConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go new file mode 100644 index 0000000000..6be07d4173 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorspec.go @@ -0,0 +1,64 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OperatorSpecApplyConfiguration represents a declarative configuration of the OperatorSpec type for use +// with apply. +type OperatorSpecApplyConfiguration struct { + ManagementState *operatorv1.ManagementState `json:"managementState,omitempty"` + LogLevel *operatorv1.LogLevel `json:"logLevel,omitempty"` + OperatorLogLevel *operatorv1.LogLevel `json:"operatorLogLevel,omitempty"` + UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` + ObservedConfig *runtime.RawExtension `json:"observedConfig,omitempty"` +} + +// OperatorSpecApplyConfiguration constructs a declarative configuration of the OperatorSpec type for use with +// apply. +func OperatorSpec() *OperatorSpecApplyConfiguration { + return &OperatorSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OperatorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OperatorSpecApplyConfiguration { + b.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OperatorSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OperatorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OperatorSpecApplyConfiguration { + b.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OperatorSpecApplyConfiguration { + b.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OperatorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OperatorSpecApplyConfiguration { + b.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorstatus.go new file mode 100644 index 0000000000..45b43e453a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/operatorstatus.go @@ -0,0 +1,78 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OperatorStatusApplyConfiguration represents a declarative configuration of the OperatorStatus type for use +// with apply. +type OperatorStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + Conditions []OperatorConditionApplyConfiguration `json:"conditions,omitempty"` + Version *string `json:"version,omitempty"` + ReadyReplicas *int32 `json:"readyReplicas,omitempty"` + LatestAvailableRevision *int32 `json:"latestAvailableRevision,omitempty"` + Generations []GenerationStatusApplyConfiguration `json:"generations,omitempty"` +} + +// OperatorStatusApplyConfiguration constructs a declarative configuration of the OperatorStatus type for use with +// apply. +func OperatorStatus() *OperatorStatusApplyConfiguration { + return &OperatorStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OperatorStatusApplyConfiguration) WithObservedGeneration(value int64) *OperatorStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OperatorStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OperatorStatusApplyConfiguration) WithVersion(value string) *OperatorStatusApplyConfiguration { + b.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OperatorStatusApplyConfiguration) WithReadyReplicas(value int32) *OperatorStatusApplyConfiguration { + b.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OperatorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OperatorStatusApplyConfiguration { + b.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OperatorStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.Generations = append(b.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go new file mode 100644 index 0000000000..9d878cea91 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go @@ -0,0 +1,126 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// OVNKubernetesConfigApplyConfiguration represents a declarative configuration of the OVNKubernetesConfig type for use +// with apply. +type OVNKubernetesConfigApplyConfiguration struct { + MTU *uint32 `json:"mtu,omitempty"` + GenevePort *uint32 `json:"genevePort,omitempty"` + HybridOverlayConfig *HybridOverlayConfigApplyConfiguration `json:"hybridOverlayConfig,omitempty"` + IPsecConfig *IPsecConfigApplyConfiguration `json:"ipsecConfig,omitempty"` + PolicyAuditConfig *PolicyAuditConfigApplyConfiguration `json:"policyAuditConfig,omitempty"` + GatewayConfig *GatewayConfigApplyConfiguration `json:"gatewayConfig,omitempty"` + V4InternalSubnet *string `json:"v4InternalSubnet,omitempty"` + V6InternalSubnet *string `json:"v6InternalSubnet,omitempty"` + EgressIPConfig *EgressIPConfigApplyConfiguration `json:"egressIPConfig,omitempty"` + IPv4 *IPv4OVNKubernetesConfigApplyConfiguration `json:"ipv4,omitempty"` + IPv6 *IPv6OVNKubernetesConfigApplyConfiguration `json:"ipv6,omitempty"` + RouteAdvertisements *operatorv1.RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` +} + +// OVNKubernetesConfigApplyConfiguration constructs a declarative configuration of the OVNKubernetesConfig type for use with +// apply. +func OVNKubernetesConfig() *OVNKubernetesConfigApplyConfiguration { + return &OVNKubernetesConfigApplyConfiguration{} +} + +// WithMTU sets the MTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MTU field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithMTU(value uint32) *OVNKubernetesConfigApplyConfiguration { + b.MTU = &value + return b +} + +// WithGenevePort sets the GenevePort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenevePort field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithGenevePort(value uint32) *OVNKubernetesConfigApplyConfiguration { + b.GenevePort = &value + return b +} + +// WithHybridOverlayConfig sets the HybridOverlayConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HybridOverlayConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithHybridOverlayConfig(value *HybridOverlayConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.HybridOverlayConfig = value + return b +} + +// WithIPsecConfig sets the IPsecConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPsecConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithIPsecConfig(value *IPsecConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.IPsecConfig = value + return b +} + +// WithPolicyAuditConfig sets the PolicyAuditConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyAuditConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithPolicyAuditConfig(value *PolicyAuditConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.PolicyAuditConfig = value + return b +} + +// WithGatewayConfig sets the GatewayConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatewayConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithGatewayConfig(value *GatewayConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.GatewayConfig = value + return b +} + +// WithV4InternalSubnet sets the V4InternalSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the V4InternalSubnet field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithV4InternalSubnet(value string) *OVNKubernetesConfigApplyConfiguration { + b.V4InternalSubnet = &value + return b +} + +// WithV6InternalSubnet sets the V6InternalSubnet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the V6InternalSubnet field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithV6InternalSubnet(value string) *OVNKubernetesConfigApplyConfiguration { + b.V6InternalSubnet = &value + return b +} + +// WithEgressIPConfig sets the EgressIPConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EgressIPConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithEgressIPConfig(value *EgressIPConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.EgressIPConfig = value + return b +} + +// WithIPv4 sets the IPv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv4 field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithIPv4(value *IPv4OVNKubernetesConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.IPv4 = value + return b +} + +// WithIPv6 sets the IPv6 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6 field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithIPv6(value *IPv6OVNKubernetesConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.IPv6 = value + return b +} + +// WithRouteAdvertisements sets the RouteAdvertisements field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouteAdvertisements field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithRouteAdvertisements(value operatorv1.RouteAdvertisementsEnablement) *OVNKubernetesConfigApplyConfiguration { + b.RouteAdvertisements = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go new file mode 100644 index 0000000000..885c402797 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/partialselector.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PartialSelectorApplyConfiguration represents a declarative configuration of the PartialSelector type for use +// with apply. +type PartialSelectorApplyConfiguration struct { + MachineResourceSelector *metav1.LabelSelectorApplyConfiguration `json:"machineResourceSelector,omitempty"` +} + +// PartialSelectorApplyConfiguration constructs a declarative configuration of the PartialSelector type for use with +// apply. +func PartialSelector() *PartialSelectorApplyConfiguration { + return &PartialSelectorApplyConfiguration{} +} + +// WithMachineResourceSelector sets the MachineResourceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineResourceSelector field is set to the value of the last call. +func (b *PartialSelectorApplyConfiguration) WithMachineResourceSelector(value *metav1.LabelSelectorApplyConfiguration) *PartialSelectorApplyConfiguration { + b.MachineResourceSelector = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go new file mode 100644 index 0000000000..1e59477b39 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go @@ -0,0 +1,53 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PerspectiveApplyConfiguration represents a declarative configuration of the Perspective type for use +// with apply. +type PerspectiveApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Visibility *PerspectiveVisibilityApplyConfiguration `json:"visibility,omitempty"` + PinnedResources *[]PinnedResourceReferenceApplyConfiguration `json:"pinnedResources,omitempty"` +} + +// PerspectiveApplyConfiguration constructs a declarative configuration of the Perspective type for use with +// apply. +func Perspective() *PerspectiveApplyConfiguration { + return &PerspectiveApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *PerspectiveApplyConfiguration) WithID(value string) *PerspectiveApplyConfiguration { + b.ID = &value + return b +} + +// WithVisibility sets the Visibility field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Visibility field is set to the value of the last call. +func (b *PerspectiveApplyConfiguration) WithVisibility(value *PerspectiveVisibilityApplyConfiguration) *PerspectiveApplyConfiguration { + b.Visibility = value + return b +} + +func (b *PerspectiveApplyConfiguration) ensurePinnedResourceReferenceApplyConfigurationExists() { + if b.PinnedResources == nil { + b.PinnedResources = &[]PinnedResourceReferenceApplyConfiguration{} + } +} + +// WithPinnedResources adds the given value to the PinnedResources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PinnedResources field. +func (b *PerspectiveApplyConfiguration) WithPinnedResources(values ...*PinnedResourceReferenceApplyConfiguration) *PerspectiveApplyConfiguration { + b.ensurePinnedResourceReferenceApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPinnedResources") + } + *b.PinnedResources = append(*b.PinnedResources, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go new file mode 100644 index 0000000000..2225574191 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// PerspectiveVisibilityApplyConfiguration represents a declarative configuration of the PerspectiveVisibility type for use +// with apply. +type PerspectiveVisibilityApplyConfiguration struct { + State *operatorv1.PerspectiveState `json:"state,omitempty"` + AccessReview *ResourceAttributesAccessReviewApplyConfiguration `json:"accessReview,omitempty"` +} + +// PerspectiveVisibilityApplyConfiguration constructs a declarative configuration of the PerspectiveVisibility type for use with +// apply. +func PerspectiveVisibility() *PerspectiveVisibilityApplyConfiguration { + return &PerspectiveVisibilityApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *PerspectiveVisibilityApplyConfiguration) WithState(value operatorv1.PerspectiveState) *PerspectiveVisibilityApplyConfiguration { + b.State = &value + return b +} + +// WithAccessReview sets the AccessReview field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessReview field is set to the value of the last call. +func (b *PerspectiveVisibilityApplyConfiguration) WithAccessReview(value *ResourceAttributesAccessReviewApplyConfiguration) *PerspectiveVisibilityApplyConfiguration { + b.AccessReview = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go new file mode 100644 index 0000000000..7a041847c1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PinnedResourceReferenceApplyConfiguration represents a declarative configuration of the PinnedResourceReference type for use +// with apply. +type PinnedResourceReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Version *string `json:"version,omitempty"` + Resource *string `json:"resource,omitempty"` +} + +// PinnedResourceReferenceApplyConfiguration constructs a declarative configuration of the PinnedResourceReference type for use with +// apply. +func PinnedResourceReference() *PinnedResourceReferenceApplyConfiguration { + return &PinnedResourceReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *PinnedResourceReferenceApplyConfiguration) WithGroup(value string) *PinnedResourceReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *PinnedResourceReferenceApplyConfiguration) WithVersion(value string) *PinnedResourceReferenceApplyConfiguration { + b.Version = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *PinnedResourceReferenceApplyConfiguration) WithResource(value string) *PinnedResourceReferenceApplyConfiguration { + b.Resource = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go new file mode 100644 index 0000000000..2886a4e413 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go @@ -0,0 +1,59 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PolicyAuditConfigApplyConfiguration represents a declarative configuration of the PolicyAuditConfig type for use +// with apply. +type PolicyAuditConfigApplyConfiguration struct { + RateLimit *uint32 `json:"rateLimit,omitempty"` + MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + MaxLogFiles *int32 `json:"maxLogFiles,omitempty"` + Destination *string `json:"destination,omitempty"` + SyslogFacility *string `json:"syslogFacility,omitempty"` +} + +// PolicyAuditConfigApplyConfiguration constructs a declarative configuration of the PolicyAuditConfig type for use with +// apply. +func PolicyAuditConfig() *PolicyAuditConfigApplyConfiguration { + return &PolicyAuditConfigApplyConfiguration{} +} + +// WithRateLimit sets the RateLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RateLimit field is set to the value of the last call. +func (b *PolicyAuditConfigApplyConfiguration) WithRateLimit(value uint32) *PolicyAuditConfigApplyConfiguration { + b.RateLimit = &value + return b +} + +// WithMaxFileSize sets the MaxFileSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxFileSize field is set to the value of the last call. +func (b *PolicyAuditConfigApplyConfiguration) WithMaxFileSize(value uint32) *PolicyAuditConfigApplyConfiguration { + b.MaxFileSize = &value + return b +} + +// WithMaxLogFiles sets the MaxLogFiles field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLogFiles field is set to the value of the last call. +func (b *PolicyAuditConfigApplyConfiguration) WithMaxLogFiles(value int32) *PolicyAuditConfigApplyConfiguration { + b.MaxLogFiles = &value + return b +} + +// WithDestination sets the Destination field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Destination field is set to the value of the last call. +func (b *PolicyAuditConfigApplyConfiguration) WithDestination(value string) *PolicyAuditConfigApplyConfiguration { + b.Destination = &value + return b +} + +// WithSyslogFacility sets the SyslogFacility field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SyslogFacility field is set to the value of the last call. +func (b *PolicyAuditConfigApplyConfiguration) WithSyslogFacility(value string) *PolicyAuditConfigApplyConfiguration { + b.SyslogFacility = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go new file mode 100644 index 0000000000..4115713802 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/privatestrategy.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// PrivateStrategyApplyConfiguration represents a declarative configuration of the PrivateStrategy type for use +// with apply. +type PrivateStrategyApplyConfiguration struct { + Protocol *operatorv1.IngressControllerProtocol `json:"protocol,omitempty"` +} + +// PrivateStrategyApplyConfiguration constructs a declarative configuration of the PrivateStrategy type for use with +// apply. +func PrivateStrategy() *PrivateStrategyApplyConfiguration { + return &PrivateStrategyApplyConfiguration{} +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *PrivateStrategyApplyConfiguration) WithProtocol(value operatorv1.IngressControllerProtocol) *PrivateStrategyApplyConfiguration { + b.Protocol = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/projectaccess.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/projectaccess.go new file mode 100644 index 0000000000..22d5dc4897 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/projectaccess.go @@ -0,0 +1,25 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProjectAccessApplyConfiguration represents a declarative configuration of the ProjectAccess type for use +// with apply. +type ProjectAccessApplyConfiguration struct { + AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` +} + +// ProjectAccessApplyConfiguration constructs a declarative configuration of the ProjectAccess type for use with +// apply. +func ProjectAccess() *ProjectAccessApplyConfiguration { + return &ProjectAccessApplyConfiguration{} +} + +// WithAvailableClusterRoles adds the given value to the AvailableClusterRoles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AvailableClusterRoles field. +func (b *ProjectAccessApplyConfiguration) WithAvailableClusterRoles(values ...string) *ProjectAccessApplyConfiguration { + for i := range values { + b.AvailableClusterRoles = append(b.AvailableClusterRoles, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go new file mode 100644 index 0000000000..0812e69747 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ProviderLoadBalancerParametersApplyConfiguration represents a declarative configuration of the ProviderLoadBalancerParameters type for use +// with apply. +type ProviderLoadBalancerParametersApplyConfiguration struct { + Type *operatorv1.LoadBalancerProviderType `json:"type,omitempty"` + AWS *AWSLoadBalancerParametersApplyConfiguration `json:"aws,omitempty"` + GCP *GCPLoadBalancerParametersApplyConfiguration `json:"gcp,omitempty"` + IBM *IBMLoadBalancerParametersApplyConfiguration `json:"ibm,omitempty"` + OpenStack *OpenStackLoadBalancerParametersApplyConfiguration `json:"openstack,omitempty"` +} + +// ProviderLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the ProviderLoadBalancerParameters type for use with +// apply. +func ProviderLoadBalancerParameters() *ProviderLoadBalancerParametersApplyConfiguration { + return &ProviderLoadBalancerParametersApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithType(value operatorv1.LoadBalancerProviderType) *ProviderLoadBalancerParametersApplyConfiguration { + b.Type = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithAWS(value *AWSLoadBalancerParametersApplyConfiguration) *ProviderLoadBalancerParametersApplyConfiguration { + b.AWS = value + return b +} + +// WithGCP sets the GCP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GCP field is set to the value of the last call. +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithGCP(value *GCPLoadBalancerParametersApplyConfiguration) *ProviderLoadBalancerParametersApplyConfiguration { + b.GCP = value + return b +} + +// WithIBM sets the IBM field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IBM field is set to the value of the last call. +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithIBM(value *IBMLoadBalancerParametersApplyConfiguration) *ProviderLoadBalancerParametersApplyConfiguration { + b.IBM = value + return b +} + +// WithOpenStack sets the OpenStack field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenStack field is set to the value of the last call. +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithOpenStack(value *OpenStackLoadBalancerParametersApplyConfiguration) *ProviderLoadBalancerParametersApplyConfiguration { + b.OpenStack = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go new file mode 100644 index 0000000000..2565e38764 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/proxyconfig.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ProxyConfigApplyConfiguration represents a declarative configuration of the ProxyConfig type for use +// with apply. +type ProxyConfigApplyConfiguration struct { + IptablesSyncPeriod *string `json:"iptablesSyncPeriod,omitempty"` + BindAddress *string `json:"bindAddress,omitempty"` + ProxyArguments map[string]operatorv1.ProxyArgumentList `json:"proxyArguments,omitempty"` +} + +// ProxyConfigApplyConfiguration constructs a declarative configuration of the ProxyConfig type for use with +// apply. +func ProxyConfig() *ProxyConfigApplyConfiguration { + return &ProxyConfigApplyConfiguration{} +} + +// WithIptablesSyncPeriod sets the IptablesSyncPeriod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IptablesSyncPeriod field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithIptablesSyncPeriod(value string) *ProxyConfigApplyConfiguration { + b.IptablesSyncPeriod = &value + return b +} + +// WithBindAddress sets the BindAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BindAddress field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithBindAddress(value string) *ProxyConfigApplyConfiguration { + b.BindAddress = &value + return b +} + +// WithProxyArguments puts the entries into the ProxyArguments field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the ProxyArguments field, +// overwriting an existing map entries in ProxyArguments field with the same key. +func (b *ProxyConfigApplyConfiguration) WithProxyArguments(entries map[string]operatorv1.ProxyArgumentList) *ProxyConfigApplyConfiguration { + if b.ProxyArguments == nil && len(entries) > 0 { + b.ProxyArguments = make(map[string]operatorv1.ProxyArgumentList, len(entries)) + } + for k, v := range entries { + b.ProxyArguments[k] = v + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/quickstarts.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/quickstarts.go new file mode 100644 index 0000000000..f32ee57080 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/quickstarts.go @@ -0,0 +1,25 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// QuickStartsApplyConfiguration represents a declarative configuration of the QuickStarts type for use +// with apply. +type QuickStartsApplyConfiguration struct { + Disabled []string `json:"disabled,omitempty"` +} + +// QuickStartsApplyConfiguration constructs a declarative configuration of the QuickStarts type for use with +// apply. +func QuickStarts() *QuickStartsApplyConfiguration { + return &QuickStartsApplyConfiguration{} +} + +// WithDisabled adds the given value to the Disabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Disabled field. +func (b *QuickStartsApplyConfiguration) WithDisabled(values ...string) *QuickStartsApplyConfiguration { + for i := range values { + b.Disabled = append(b.Disabled, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go new file mode 100644 index 0000000000..aef55a1b18 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/reloadservice.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ReloadServiceApplyConfiguration represents a declarative configuration of the ReloadService type for use +// with apply. +type ReloadServiceApplyConfiguration struct { + ServiceName *operatorv1.NodeDisruptionPolicyServiceName `json:"serviceName,omitempty"` +} + +// ReloadServiceApplyConfiguration constructs a declarative configuration of the ReloadService type for use with +// apply. +func ReloadService() *ReloadServiceApplyConfiguration { + return &ReloadServiceApplyConfiguration{} +} + +// WithServiceName sets the ServiceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceName field is set to the value of the last call. +func (b *ReloadServiceApplyConfiguration) WithServiceName(value operatorv1.NodeDisruptionPolicyServiceName) *ReloadServiceApplyConfiguration { + b.ServiceName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go new file mode 100644 index 0000000000..96e749c5f8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "k8s.io/api/authorization/v1" +) + +// ResourceAttributesAccessReviewApplyConfiguration represents a declarative configuration of the ResourceAttributesAccessReview type for use +// with apply. +type ResourceAttributesAccessReviewApplyConfiguration struct { + Required []authorizationv1.ResourceAttributes `json:"required,omitempty"` + Missing []authorizationv1.ResourceAttributes `json:"missing,omitempty"` +} + +// ResourceAttributesAccessReviewApplyConfiguration constructs a declarative configuration of the ResourceAttributesAccessReview type for use with +// apply. +func ResourceAttributesAccessReview() *ResourceAttributesAccessReviewApplyConfiguration { + return &ResourceAttributesAccessReviewApplyConfiguration{} +} + +// WithRequired adds the given value to the Required field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Required field. +func (b *ResourceAttributesAccessReviewApplyConfiguration) WithRequired(values ...authorizationv1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { + for i := range values { + b.Required = append(b.Required, values[i]) + } + return b +} + +// WithMissing adds the given value to the Missing field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Missing field. +func (b *ResourceAttributesAccessReviewApplyConfiguration) WithMissing(values ...authorizationv1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { + for i := range values { + b.Missing = append(b.Missing, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go new file mode 100644 index 0000000000..36c43a116f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/restartservice.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// RestartServiceApplyConfiguration represents a declarative configuration of the RestartService type for use +// with apply. +type RestartServiceApplyConfiguration struct { + ServiceName *operatorv1.NodeDisruptionPolicyServiceName `json:"serviceName,omitempty"` +} + +// RestartServiceApplyConfiguration constructs a declarative configuration of the RestartService type for use with +// apply. +func RestartService() *RestartServiceApplyConfiguration { + return &RestartServiceApplyConfiguration{} +} + +// WithServiceName sets the ServiceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceName field is set to the value of the last call. +func (b *RestartServiceApplyConfiguration) WithServiceName(value operatorv1.NodeDisruptionPolicyServiceName) *RestartServiceApplyConfiguration { + b.ServiceName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go new file mode 100644 index 0000000000..5faa3c56b4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/routeadmissionpolicy.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// RouteAdmissionPolicyApplyConfiguration represents a declarative configuration of the RouteAdmissionPolicy type for use +// with apply. +type RouteAdmissionPolicyApplyConfiguration struct { + NamespaceOwnership *operatorv1.NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` + WildcardPolicy *operatorv1.WildcardPolicy `json:"wildcardPolicy,omitempty"` +} + +// RouteAdmissionPolicyApplyConfiguration constructs a declarative configuration of the RouteAdmissionPolicy type for use with +// apply. +func RouteAdmissionPolicy() *RouteAdmissionPolicyApplyConfiguration { + return &RouteAdmissionPolicyApplyConfiguration{} +} + +// WithNamespaceOwnership sets the NamespaceOwnership field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceOwnership field is set to the value of the last call. +func (b *RouteAdmissionPolicyApplyConfiguration) WithNamespaceOwnership(value operatorv1.NamespaceOwnershipCheck) *RouteAdmissionPolicyApplyConfiguration { + b.NamespaceOwnership = &value + return b +} + +// WithWildcardPolicy sets the WildcardPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WildcardPolicy field is set to the value of the last call. +func (b *RouteAdmissionPolicyApplyConfiguration) WithWildcardPolicy(value operatorv1.WildcardPolicy) *RouteAdmissionPolicyApplyConfiguration { + b.WildcardPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/server.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/server.go new file mode 100644 index 0000000000..e0eee5571b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/server.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServerApplyConfiguration represents a declarative configuration of the Server type for use +// with apply. +type ServerApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Zones []string `json:"zones,omitempty"` + ForwardPlugin *ForwardPluginApplyConfiguration `json:"forwardPlugin,omitempty"` +} + +// ServerApplyConfiguration constructs a declarative configuration of the Server type for use with +// apply. +func Server() *ServerApplyConfiguration { + return &ServerApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServerApplyConfiguration) WithName(value string) *ServerApplyConfiguration { + b.Name = &value + return b +} + +// WithZones adds the given value to the Zones field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Zones field. +func (b *ServerApplyConfiguration) WithZones(values ...string) *ServerApplyConfiguration { + for i := range values { + b.Zones = append(b.Zones, values[i]) + } + return b +} + +// WithForwardPlugin sets the ForwardPlugin field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForwardPlugin field is set to the value of the last call. +func (b *ServerApplyConfiguration) WithForwardPlugin(value *ForwardPluginApplyConfiguration) *ServerApplyConfiguration { + b.ForwardPlugin = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go new file mode 100644 index 0000000000..f4a6de0b61 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServiceAccountIssuerStatusApplyConfiguration represents a declarative configuration of the ServiceAccountIssuerStatus type for use +// with apply. +type ServiceAccountIssuerStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + ExpirationTime *metav1.Time `json:"expirationTime,omitempty"` +} + +// ServiceAccountIssuerStatusApplyConfiguration constructs a declarative configuration of the ServiceAccountIssuerStatus type for use with +// apply. +func ServiceAccountIssuerStatus() *ServiceAccountIssuerStatusApplyConfiguration { + return &ServiceAccountIssuerStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountIssuerStatusApplyConfiguration) WithName(value string) *ServiceAccountIssuerStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithExpirationTime sets the ExpirationTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpirationTime field is set to the value of the last call. +func (b *ServiceAccountIssuerStatusApplyConfiguration) WithExpirationTime(value metav1.Time) *ServiceAccountIssuerStatusApplyConfiguration { + b.ExpirationTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go new file mode 100644 index 0000000000..bb07573817 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCAApplyConfiguration represents a declarative configuration of the ServiceCA type for use +// with apply. +type ServiceCAApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCASpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCAStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCA constructs a declarative configuration of the ServiceCA type for use with +// apply. +func ServiceCA(name string) *ServiceCAApplyConfiguration { + b := &ServiceCAApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCA") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractServiceCA extracts the applied configuration owned by fieldManager from +// serviceCA. If no managedFields are found in serviceCA for fieldManager, a +// ServiceCAApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCA must be a unmodified ServiceCA API object that was retrieved from the Kubernetes API. +// ExtractServiceCA provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCA(serviceCA *operatorv1.ServiceCA, fieldManager string) (*ServiceCAApplyConfiguration, error) { + return extractServiceCA(serviceCA, fieldManager, "") +} + +// ExtractServiceCAStatus is the same as ExtractServiceCA except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCAStatus(serviceCA *operatorv1.ServiceCA, fieldManager string) (*ServiceCAApplyConfiguration, error) { + return extractServiceCA(serviceCA, fieldManager, "status") +} + +func extractServiceCA(serviceCA *operatorv1.ServiceCA, fieldManager string, subresource string) (*ServiceCAApplyConfiguration, error) { + b := &ServiceCAApplyConfiguration{} + err := managedfields.ExtractInto(serviceCA, internal.Parser().Type("com.github.openshift.api.operator.v1.ServiceCA"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCA.Name) + + b.WithKind("ServiceCA") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithKind(value string) *ServiceCAApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithAPIVersion(value string) *ServiceCAApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithName(value string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithGenerateName(value string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithNamespace(value string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithUID(value types.UID) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithResourceVersion(value string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithGeneration(value int64) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCAApplyConfiguration) WithLabels(entries map[string]string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCAApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCAApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCAApplyConfiguration) WithFinalizers(values ...string) *ServiceCAApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCAApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithSpec(value *ServiceCASpecApplyConfiguration) *ServiceCAApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCAApplyConfiguration) WithStatus(value *ServiceCAStatusApplyConfiguration) *ServiceCAApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCAApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go new file mode 100644 index 0000000000..844041ef34 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecaspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ServiceCASpecApplyConfiguration represents a declarative configuration of the ServiceCASpec type for use +// with apply. +type ServiceCASpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// ServiceCASpecApplyConfiguration constructs a declarative configuration of the ServiceCASpec type for use with +// apply. +func ServiceCASpec() *ServiceCASpecApplyConfiguration { + return &ServiceCASpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *ServiceCASpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ServiceCASpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ServiceCASpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ServiceCASpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ServiceCASpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ServiceCASpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *ServiceCASpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ServiceCASpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *ServiceCASpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ServiceCASpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go new file mode 100644 index 0000000000..957190e8bb --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecastatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServiceCAStatusApplyConfiguration represents a declarative configuration of the ServiceCAStatus type for use +// with apply. +type ServiceCAStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// ServiceCAStatusApplyConfiguration constructs a declarative configuration of the ServiceCAStatus type for use with +// apply. +func ServiceCAStatus() *ServiceCAStatusApplyConfiguration { + return &ServiceCAStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ServiceCAStatusApplyConfiguration) WithObservedGeneration(value int64) *ServiceCAStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCAStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *ServiceCAStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ServiceCAStatusApplyConfiguration) WithVersion(value string) *ServiceCAStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *ServiceCAStatusApplyConfiguration) WithReadyReplicas(value int32) *ServiceCAStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *ServiceCAStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ServiceCAStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *ServiceCAStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *ServiceCAStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go new file mode 100644 index 0000000000..52981ca867 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCatalogAPIServerApplyConfiguration represents a declarative configuration of the ServiceCatalogAPIServer type for use +// with apply. +type ServiceCatalogAPIServerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCatalogAPIServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCatalogAPIServerStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCatalogAPIServer constructs a declarative configuration of the ServiceCatalogAPIServer type for use with +// apply. +func ServiceCatalogAPIServer(name string) *ServiceCatalogAPIServerApplyConfiguration { + b := &ServiceCatalogAPIServerApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCatalogAPIServer") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractServiceCatalogAPIServer extracts the applied configuration owned by fieldManager from +// serviceCatalogAPIServer. If no managedFields are found in serviceCatalogAPIServer for fieldManager, a +// ServiceCatalogAPIServerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCatalogAPIServer must be a unmodified ServiceCatalogAPIServer API object that was retrieved from the Kubernetes API. +// ExtractServiceCatalogAPIServer provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCatalogAPIServer(serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, fieldManager string) (*ServiceCatalogAPIServerApplyConfiguration, error) { + return extractServiceCatalogAPIServer(serviceCatalogAPIServer, fieldManager, "") +} + +// ExtractServiceCatalogAPIServerStatus is the same as ExtractServiceCatalogAPIServer except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCatalogAPIServerStatus(serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, fieldManager string) (*ServiceCatalogAPIServerApplyConfiguration, error) { + return extractServiceCatalogAPIServer(serviceCatalogAPIServer, fieldManager, "status") +} + +func extractServiceCatalogAPIServer(serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, fieldManager string, subresource string) (*ServiceCatalogAPIServerApplyConfiguration, error) { + b := &ServiceCatalogAPIServerApplyConfiguration{} + err := managedfields.ExtractInto(serviceCatalogAPIServer, internal.Parser().Type("com.github.openshift.api.operator.v1.ServiceCatalogAPIServer"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCatalogAPIServer.Name) + + b.WithKind("ServiceCatalogAPIServer") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithKind(value string) *ServiceCatalogAPIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithAPIVersion(value string) *ServiceCatalogAPIServerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithName(value string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithGenerateName(value string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithNamespace(value string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithUID(value types.UID) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithResourceVersion(value string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithGeneration(value int64) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithLabels(entries map[string]string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithFinalizers(values ...string) *ServiceCatalogAPIServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCatalogAPIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithSpec(value *ServiceCatalogAPIServerSpecApplyConfiguration) *ServiceCatalogAPIServerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCatalogAPIServerApplyConfiguration) WithStatus(value *ServiceCatalogAPIServerStatusApplyConfiguration) *ServiceCatalogAPIServerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCatalogAPIServerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go new file mode 100644 index 0000000000..b5271a4094 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ServiceCatalogAPIServerSpecApplyConfiguration represents a declarative configuration of the ServiceCatalogAPIServerSpec type for use +// with apply. +type ServiceCatalogAPIServerSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// ServiceCatalogAPIServerSpecApplyConfiguration constructs a declarative configuration of the ServiceCatalogAPIServerSpec type for use with +// apply. +func ServiceCatalogAPIServerSpec() *ServiceCatalogAPIServerSpecApplyConfiguration { + return &ServiceCatalogAPIServerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ServiceCatalogAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ServiceCatalogAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ServiceCatalogAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ServiceCatalogAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *ServiceCatalogAPIServerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ServiceCatalogAPIServerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go new file mode 100644 index 0000000000..a82e4e5f04 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserverstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServiceCatalogAPIServerStatusApplyConfiguration represents a declarative configuration of the ServiceCatalogAPIServerStatus type for use +// with apply. +type ServiceCatalogAPIServerStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// ServiceCatalogAPIServerStatusApplyConfiguration constructs a declarative configuration of the ServiceCatalogAPIServerStatus type for use with +// apply. +func ServiceCatalogAPIServerStatus() *ServiceCatalogAPIServerStatusApplyConfiguration { + return &ServiceCatalogAPIServerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithObservedGeneration(value int64) *ServiceCatalogAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *ServiceCatalogAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithVersion(value string) *ServiceCatalogAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithReadyReplicas(value int32) *ServiceCatalogAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ServiceCatalogAPIServerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *ServiceCatalogAPIServerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *ServiceCatalogAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go new file mode 100644 index 0000000000..f01957710a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCatalogControllerManagerApplyConfiguration represents a declarative configuration of the ServiceCatalogControllerManager type for use +// with apply. +type ServiceCatalogControllerManagerApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCatalogControllerManagerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCatalogControllerManagerStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCatalogControllerManager constructs a declarative configuration of the ServiceCatalogControllerManager type for use with +// apply. +func ServiceCatalogControllerManager(name string) *ServiceCatalogControllerManagerApplyConfiguration { + b := &ServiceCatalogControllerManagerApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCatalogControllerManager") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractServiceCatalogControllerManager extracts the applied configuration owned by fieldManager from +// serviceCatalogControllerManager. If no managedFields are found in serviceCatalogControllerManager for fieldManager, a +// ServiceCatalogControllerManagerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCatalogControllerManager must be a unmodified ServiceCatalogControllerManager API object that was retrieved from the Kubernetes API. +// ExtractServiceCatalogControllerManager provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCatalogControllerManager(serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, fieldManager string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { + return extractServiceCatalogControllerManager(serviceCatalogControllerManager, fieldManager, "") +} + +// ExtractServiceCatalogControllerManagerStatus is the same as ExtractServiceCatalogControllerManager except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCatalogControllerManagerStatus(serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, fieldManager string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { + return extractServiceCatalogControllerManager(serviceCatalogControllerManager, fieldManager, "status") +} + +func extractServiceCatalogControllerManager(serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, fieldManager string, subresource string) (*ServiceCatalogControllerManagerApplyConfiguration, error) { + b := &ServiceCatalogControllerManagerApplyConfiguration{} + err := managedfields.ExtractInto(serviceCatalogControllerManager, internal.Parser().Type("com.github.openshift.api.operator.v1.ServiceCatalogControllerManager"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCatalogControllerManager.Name) + + b.WithKind("ServiceCatalogControllerManager") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithKind(value string) *ServiceCatalogControllerManagerApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithAPIVersion(value string) *ServiceCatalogControllerManagerApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithName(value string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithGenerateName(value string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithNamespace(value string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithUID(value types.UID) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithResourceVersion(value string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithGeneration(value int64) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithLabels(entries map[string]string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithFinalizers(values ...string) *ServiceCatalogControllerManagerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCatalogControllerManagerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithSpec(value *ServiceCatalogControllerManagerSpecApplyConfiguration) *ServiceCatalogControllerManagerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerApplyConfiguration) WithStatus(value *ServiceCatalogControllerManagerStatusApplyConfiguration) *ServiceCatalogControllerManagerApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCatalogControllerManagerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go new file mode 100644 index 0000000000..83df00b3ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ServiceCatalogControllerManagerSpecApplyConfiguration represents a declarative configuration of the ServiceCatalogControllerManagerSpec type for use +// with apply. +type ServiceCatalogControllerManagerSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// ServiceCatalogControllerManagerSpecApplyConfiguration constructs a declarative configuration of the ServiceCatalogControllerManagerSpec type for use with +// apply. +func ServiceCatalogControllerManagerSpec() *ServiceCatalogControllerManagerSpecApplyConfiguration { + return &ServiceCatalogControllerManagerSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *ServiceCatalogControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *ServiceCatalogControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *ServiceCatalogControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *ServiceCatalogControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *ServiceCatalogControllerManagerSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go new file mode 100644 index 0000000000..d153702175 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanagerstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ServiceCatalogControllerManagerStatusApplyConfiguration represents a declarative configuration of the ServiceCatalogControllerManagerStatus type for use +// with apply. +type ServiceCatalogControllerManagerStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// ServiceCatalogControllerManagerStatusApplyConfiguration constructs a declarative configuration of the ServiceCatalogControllerManagerStatus type for use with +// apply. +func ServiceCatalogControllerManagerStatus() *ServiceCatalogControllerManagerStatusApplyConfiguration { + return &ServiceCatalogControllerManagerStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithObservedGeneration(value int64) *ServiceCatalogControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *ServiceCatalogControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithVersion(value string) *ServiceCatalogControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithReadyReplicas(value int32) *ServiceCatalogControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *ServiceCatalogControllerManagerStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *ServiceCatalogControllerManagerStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *ServiceCatalogControllerManagerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go new file mode 100644 index 0000000000..350bfbd98a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/sflowconfig.go @@ -0,0 +1,29 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SFlowConfigApplyConfiguration represents a declarative configuration of the SFlowConfig type for use +// with apply. +type SFlowConfigApplyConfiguration struct { + Collectors []operatorv1.IPPort `json:"collectors,omitempty"` +} + +// SFlowConfigApplyConfiguration constructs a declarative configuration of the SFlowConfig type for use with +// apply. +func SFlowConfig() *SFlowConfigApplyConfiguration { + return &SFlowConfigApplyConfiguration{} +} + +// WithCollectors adds the given value to the Collectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Collectors field. +func (b *SFlowConfigApplyConfiguration) WithCollectors(values ...operatorv1.IPPort) *SFlowConfigApplyConfiguration { + for i := range values { + b.Collectors = append(b.Collectors, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/simplemacvlanconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/simplemacvlanconfig.go new file mode 100644 index 0000000000..2594321af6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/simplemacvlanconfig.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SimpleMacvlanConfigApplyConfiguration represents a declarative configuration of the SimpleMacvlanConfig type for use +// with apply. +type SimpleMacvlanConfigApplyConfiguration struct { + Master *string `json:"master,omitempty"` + IPAMConfig *IPAMConfigApplyConfiguration `json:"ipamConfig,omitempty"` + Mode *operatorv1.MacvlanMode `json:"mode,omitempty"` + MTU *uint32 `json:"mtu,omitempty"` +} + +// SimpleMacvlanConfigApplyConfiguration constructs a declarative configuration of the SimpleMacvlanConfig type for use with +// apply. +func SimpleMacvlanConfig() *SimpleMacvlanConfigApplyConfiguration { + return &SimpleMacvlanConfigApplyConfiguration{} +} + +// WithMaster sets the Master field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Master field is set to the value of the last call. +func (b *SimpleMacvlanConfigApplyConfiguration) WithMaster(value string) *SimpleMacvlanConfigApplyConfiguration { + b.Master = &value + return b +} + +// WithIPAMConfig sets the IPAMConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPAMConfig field is set to the value of the last call. +func (b *SimpleMacvlanConfigApplyConfiguration) WithIPAMConfig(value *IPAMConfigApplyConfiguration) *SimpleMacvlanConfigApplyConfiguration { + b.IPAMConfig = value + return b +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *SimpleMacvlanConfigApplyConfiguration) WithMode(value operatorv1.MacvlanMode) *SimpleMacvlanConfigApplyConfiguration { + b.Mode = &value + return b +} + +// WithMTU sets the MTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MTU field is set to the value of the last call. +func (b *SimpleMacvlanConfigApplyConfiguration) WithMTU(value uint32) *SimpleMacvlanConfigApplyConfiguration { + b.MTU = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamaddresses.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamaddresses.go new file mode 100644 index 0000000000..7b38e7a087 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamaddresses.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StaticIPAMAddressesApplyConfiguration represents a declarative configuration of the StaticIPAMAddresses type for use +// with apply. +type StaticIPAMAddressesApplyConfiguration struct { + Address *string `json:"address,omitempty"` + Gateway *string `json:"gateway,omitempty"` +} + +// StaticIPAMAddressesApplyConfiguration constructs a declarative configuration of the StaticIPAMAddresses type for use with +// apply. +func StaticIPAMAddresses() *StaticIPAMAddressesApplyConfiguration { + return &StaticIPAMAddressesApplyConfiguration{} +} + +// WithAddress sets the Address field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Address field is set to the value of the last call. +func (b *StaticIPAMAddressesApplyConfiguration) WithAddress(value string) *StaticIPAMAddressesApplyConfiguration { + b.Address = &value + return b +} + +// WithGateway sets the Gateway field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Gateway field is set to the value of the last call. +func (b *StaticIPAMAddressesApplyConfiguration) WithGateway(value string) *StaticIPAMAddressesApplyConfiguration { + b.Gateway = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamconfig.go new file mode 100644 index 0000000000..acaf64799e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamconfig.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StaticIPAMConfigApplyConfiguration represents a declarative configuration of the StaticIPAMConfig type for use +// with apply. +type StaticIPAMConfigApplyConfiguration struct { + Addresses []StaticIPAMAddressesApplyConfiguration `json:"addresses,omitempty"` + Routes []StaticIPAMRoutesApplyConfiguration `json:"routes,omitempty"` + DNS *StaticIPAMDNSApplyConfiguration `json:"dns,omitempty"` +} + +// StaticIPAMConfigApplyConfiguration constructs a declarative configuration of the StaticIPAMConfig type for use with +// apply. +func StaticIPAMConfig() *StaticIPAMConfigApplyConfiguration { + return &StaticIPAMConfigApplyConfiguration{} +} + +// WithAddresses adds the given value to the Addresses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Addresses field. +func (b *StaticIPAMConfigApplyConfiguration) WithAddresses(values ...*StaticIPAMAddressesApplyConfiguration) *StaticIPAMConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAddresses") + } + b.Addresses = append(b.Addresses, *values[i]) + } + return b +} + +// WithRoutes adds the given value to the Routes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Routes field. +func (b *StaticIPAMConfigApplyConfiguration) WithRoutes(values ...*StaticIPAMRoutesApplyConfiguration) *StaticIPAMConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRoutes") + } + b.Routes = append(b.Routes, *values[i]) + } + return b +} + +// WithDNS sets the DNS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNS field is set to the value of the last call. +func (b *StaticIPAMConfigApplyConfiguration) WithDNS(value *StaticIPAMDNSApplyConfiguration) *StaticIPAMConfigApplyConfiguration { + b.DNS = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamdns.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamdns.go new file mode 100644 index 0000000000..cf22aaabf9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamdns.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StaticIPAMDNSApplyConfiguration represents a declarative configuration of the StaticIPAMDNS type for use +// with apply. +type StaticIPAMDNSApplyConfiguration struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain *string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` +} + +// StaticIPAMDNSApplyConfiguration constructs a declarative configuration of the StaticIPAMDNS type for use with +// apply. +func StaticIPAMDNS() *StaticIPAMDNSApplyConfiguration { + return &StaticIPAMDNSApplyConfiguration{} +} + +// WithNameservers adds the given value to the Nameservers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Nameservers field. +func (b *StaticIPAMDNSApplyConfiguration) WithNameservers(values ...string) *StaticIPAMDNSApplyConfiguration { + for i := range values { + b.Nameservers = append(b.Nameservers, values[i]) + } + return b +} + +// WithDomain sets the Domain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Domain field is set to the value of the last call. +func (b *StaticIPAMDNSApplyConfiguration) WithDomain(value string) *StaticIPAMDNSApplyConfiguration { + b.Domain = &value + return b +} + +// WithSearch adds the given value to the Search field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Search field. +func (b *StaticIPAMDNSApplyConfiguration) WithSearch(values ...string) *StaticIPAMDNSApplyConfiguration { + for i := range values { + b.Search = append(b.Search, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamroutes.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamroutes.go new file mode 100644 index 0000000000..d92b69b1be --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticipamroutes.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StaticIPAMRoutesApplyConfiguration represents a declarative configuration of the StaticIPAMRoutes type for use +// with apply. +type StaticIPAMRoutesApplyConfiguration struct { + Destination *string `json:"destination,omitempty"` + Gateway *string `json:"gateway,omitempty"` +} + +// StaticIPAMRoutesApplyConfiguration constructs a declarative configuration of the StaticIPAMRoutes type for use with +// apply. +func StaticIPAMRoutes() *StaticIPAMRoutesApplyConfiguration { + return &StaticIPAMRoutesApplyConfiguration{} +} + +// WithDestination sets the Destination field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Destination field is set to the value of the last call. +func (b *StaticIPAMRoutesApplyConfiguration) WithDestination(value string) *StaticIPAMRoutesApplyConfiguration { + b.Destination = &value + return b +} + +// WithGateway sets the Gateway field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Gateway field is set to the value of the last call. +func (b *StaticIPAMRoutesApplyConfiguration) WithGateway(value string) *StaticIPAMRoutesApplyConfiguration { + b.Gateway = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go new file mode 100644 index 0000000000..b2434f8d72 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorspec.go @@ -0,0 +1,87 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// StaticPodOperatorSpecApplyConfiguration represents a declarative configuration of the StaticPodOperatorSpec type for use +// with apply. +type StaticPodOperatorSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` + ForceRedeploymentReason *string `json:"forceRedeploymentReason,omitempty"` + FailedRevisionLimit *int32 `json:"failedRevisionLimit,omitempty"` + SucceededRevisionLimit *int32 `json:"succeededRevisionLimit,omitempty"` +} + +// StaticPodOperatorSpecApplyConfiguration constructs a declarative configuration of the StaticPodOperatorSpec type for use with +// apply. +func StaticPodOperatorSpec() *StaticPodOperatorSpecApplyConfiguration { + return &StaticPodOperatorSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *StaticPodOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *StaticPodOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *StaticPodOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *StaticPodOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *StaticPodOperatorSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithForceRedeploymentReason sets the ForceRedeploymentReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForceRedeploymentReason field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithForceRedeploymentReason(value string) *StaticPodOperatorSpecApplyConfiguration { + b.ForceRedeploymentReason = &value + return b +} + +// WithFailedRevisionLimit sets the FailedRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedRevisionLimit field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithFailedRevisionLimit(value int32) *StaticPodOperatorSpecApplyConfiguration { + b.FailedRevisionLimit = &value + return b +} + +// WithSucceededRevisionLimit sets the SucceededRevisionLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededRevisionLimit field is set to the value of the last call. +func (b *StaticPodOperatorSpecApplyConfiguration) WithSucceededRevisionLimit(value int32) *StaticPodOperatorSpecApplyConfiguration { + b.SucceededRevisionLimit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go new file mode 100644 index 0000000000..0067b78c73 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/staticpodoperatorstatus.go @@ -0,0 +1,96 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StaticPodOperatorStatusApplyConfiguration represents a declarative configuration of the StaticPodOperatorStatus type for use +// with apply. +type StaticPodOperatorStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` + LatestAvailableRevisionReason *string `json:"latestAvailableRevisionReason,omitempty"` + NodeStatuses []NodeStatusApplyConfiguration `json:"nodeStatuses,omitempty"` +} + +// StaticPodOperatorStatusApplyConfiguration constructs a declarative configuration of the StaticPodOperatorStatus type for use with +// apply. +func StaticPodOperatorStatus() *StaticPodOperatorStatusApplyConfiguration { + return &StaticPodOperatorStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *StaticPodOperatorStatusApplyConfiguration) WithObservedGeneration(value int64) *StaticPodOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *StaticPodOperatorStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *StaticPodOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *StaticPodOperatorStatusApplyConfiguration) WithVersion(value string) *StaticPodOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *StaticPodOperatorStatusApplyConfiguration) WithReadyReplicas(value int32) *StaticPodOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *StaticPodOperatorStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *StaticPodOperatorStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *StaticPodOperatorStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *StaticPodOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} + +// WithLatestAvailableRevisionReason sets the LatestAvailableRevisionReason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevisionReason field is set to the value of the last call. +func (b *StaticPodOperatorStatusApplyConfiguration) WithLatestAvailableRevisionReason(value string) *StaticPodOperatorStatusApplyConfiguration { + b.LatestAvailableRevisionReason = &value + return b +} + +// WithNodeStatuses adds the given value to the NodeStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NodeStatuses field. +func (b *StaticPodOperatorStatusApplyConfiguration) WithNodeStatuses(values ...*NodeStatusApplyConfiguration) *StaticPodOperatorStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNodeStatuses") + } + b.NodeStatuses = append(b.NodeStatuses, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/statuspageprovider.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/statuspageprovider.go new file mode 100644 index 0000000000..080a7ef36a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/statuspageprovider.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StatuspageProviderApplyConfiguration represents a declarative configuration of the StatuspageProvider type for use +// with apply. +type StatuspageProviderApplyConfiguration struct { + PageID *string `json:"pageID,omitempty"` +} + +// StatuspageProviderApplyConfiguration constructs a declarative configuration of the StatuspageProvider type for use with +// apply. +func StatuspageProvider() *StatuspageProviderApplyConfiguration { + return &StatuspageProviderApplyConfiguration{} +} + +// WithPageID sets the PageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PageID field is set to the value of the last call. +func (b *StatuspageProviderApplyConfiguration) WithPageID(value string) *StatuspageProviderApplyConfiguration { + b.PageID = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go new file mode 100644 index 0000000000..fe464c41e9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// StorageApplyConfiguration represents a declarative configuration of the Storage type for use +// with apply. +type StorageApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *StorageSpecApplyConfiguration `json:"spec,omitempty"` + Status *StorageStatusApplyConfiguration `json:"status,omitempty"` +} + +// Storage constructs a declarative configuration of the Storage type for use with +// apply. +func Storage(name string) *StorageApplyConfiguration { + b := &StorageApplyConfiguration{} + b.WithName(name) + b.WithKind("Storage") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractStorage extracts the applied configuration owned by fieldManager from +// storage. If no managedFields are found in storage for fieldManager, a +// StorageApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// storage must be a unmodified Storage API object that was retrieved from the Kubernetes API. +// ExtractStorage provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractStorage(storage *operatorv1.Storage, fieldManager string) (*StorageApplyConfiguration, error) { + return extractStorage(storage, fieldManager, "") +} + +// ExtractStorageStatus is the same as ExtractStorage except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStorageStatus(storage *operatorv1.Storage, fieldManager string) (*StorageApplyConfiguration, error) { + return extractStorage(storage, fieldManager, "status") +} + +func extractStorage(storage *operatorv1.Storage, fieldManager string, subresource string) (*StorageApplyConfiguration, error) { + b := &StorageApplyConfiguration{} + err := managedfields.ExtractInto(storage, internal.Parser().Type("com.github.openshift.api.operator.v1.Storage"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(storage.Name) + + b.WithKind("Storage") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithKind(value string) *StorageApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithAPIVersion(value string) *StorageApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithName(value string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithGenerateName(value string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithNamespace(value string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithUID(value types.UID) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithResourceVersion(value string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithGeneration(value int64) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *StorageApplyConfiguration) WithLabels(entries map[string]string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *StorageApplyConfiguration) WithAnnotations(entries map[string]string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *StorageApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *StorageApplyConfiguration) WithFinalizers(values ...string) *StorageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *StorageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithSpec(value *StorageSpecApplyConfiguration) *StorageApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithStatus(value *StorageStatusApplyConfiguration) *StorageApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go new file mode 100644 index 0000000000..152ea2fe03 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go @@ -0,0 +1,69 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// StorageSpecApplyConfiguration represents a declarative configuration of the StorageSpec type for use +// with apply. +type StorageSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` + VSphereStorageDriver *operatorv1.StorageDriverType `json:"vsphereStorageDriver,omitempty"` +} + +// StorageSpecApplyConfiguration constructs a declarative configuration of the StorageSpec type for use with +// apply. +func StorageSpec() *StorageSpecApplyConfiguration { + return &StorageSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *StorageSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *StorageSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *StorageSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *StorageSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *StorageSpecApplyConfiguration { + b.OperatorSpecApplyConfiguration.ObservedConfig = &value + return b +} + +// WithVSphereStorageDriver sets the VSphereStorageDriver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VSphereStorageDriver field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithVSphereStorageDriver(value operatorv1.StorageDriverType) *StorageSpecApplyConfiguration { + b.VSphereStorageDriver = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go new file mode 100644 index 0000000000..f6a0349061 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagestatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StorageStatusApplyConfiguration represents a declarative configuration of the StorageStatus type for use +// with apply. +type StorageStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// StorageStatusApplyConfiguration constructs a declarative configuration of the StorageStatus type for use with +// apply. +func StorageStatus() *StorageStatusApplyConfiguration { + return &StorageStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *StorageStatusApplyConfiguration) WithObservedGeneration(value int64) *StorageStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *StorageStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *StorageStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.OperatorStatusApplyConfiguration.Conditions = append(b.OperatorStatusApplyConfiguration.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *StorageStatusApplyConfiguration) WithVersion(value string) *StorageStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *StorageStatusApplyConfiguration) WithReadyReplicas(value int32) *StorageStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *StorageStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *StorageStatusApplyConfiguration { + b.OperatorStatusApplyConfiguration.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *StorageStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *StorageStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.OperatorStatusApplyConfiguration.Generations = append(b.OperatorStatusApplyConfiguration.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/syslogloggingdestinationparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/syslogloggingdestinationparameters.go new file mode 100644 index 0000000000..9bd8ff1f84 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/syslogloggingdestinationparameters.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SyslogLoggingDestinationParametersApplyConfiguration represents a declarative configuration of the SyslogLoggingDestinationParameters type for use +// with apply. +type SyslogLoggingDestinationParametersApplyConfiguration struct { + Address *string `json:"address,omitempty"` + Port *uint32 `json:"port,omitempty"` + Facility *string `json:"facility,omitempty"` + MaxLength *uint32 `json:"maxLength,omitempty"` +} + +// SyslogLoggingDestinationParametersApplyConfiguration constructs a declarative configuration of the SyslogLoggingDestinationParameters type for use with +// apply. +func SyslogLoggingDestinationParameters() *SyslogLoggingDestinationParametersApplyConfiguration { + return &SyslogLoggingDestinationParametersApplyConfiguration{} +} + +// WithAddress sets the Address field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Address field is set to the value of the last call. +func (b *SyslogLoggingDestinationParametersApplyConfiguration) WithAddress(value string) *SyslogLoggingDestinationParametersApplyConfiguration { + b.Address = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *SyslogLoggingDestinationParametersApplyConfiguration) WithPort(value uint32) *SyslogLoggingDestinationParametersApplyConfiguration { + b.Port = &value + return b +} + +// WithFacility sets the Facility field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Facility field is set to the value of the last call. +func (b *SyslogLoggingDestinationParametersApplyConfiguration) WithFacility(value string) *SyslogLoggingDestinationParametersApplyConfiguration { + b.Facility = &value + return b +} + +// WithMaxLength sets the MaxLength field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLength field is set to the value of the last call. +func (b *SyslogLoggingDestinationParametersApplyConfiguration) WithMaxLength(value uint32) *SyslogLoggingDestinationParametersApplyConfiguration { + b.MaxLength = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/theme.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/theme.go new file mode 100644 index 0000000000..11d3223b49 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/theme.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ThemeApplyConfiguration represents a declarative configuration of the Theme type for use +// with apply. +type ThemeApplyConfiguration struct { + Mode *operatorv1.ThemeMode `json:"mode,omitempty"` + Source *FileReferenceSourceApplyConfiguration `json:"source,omitempty"` +} + +// ThemeApplyConfiguration constructs a declarative configuration of the Theme type for use with +// apply. +func Theme() *ThemeApplyConfiguration { + return &ThemeApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *ThemeApplyConfiguration) WithMode(value operatorv1.ThemeMode) *ThemeApplyConfiguration { + b.Mode = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *ThemeApplyConfiguration) WithSource(value *FileReferenceSourceApplyConfiguration) *ThemeApplyConfiguration { + b.Source = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go new file mode 100644 index 0000000000..8f666cd18f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstream.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// UpstreamApplyConfiguration represents a declarative configuration of the Upstream type for use +// with apply. +type UpstreamApplyConfiguration struct { + Type *operatorv1.UpstreamType `json:"type,omitempty"` + Address *string `json:"address,omitempty"` + Port *uint32 `json:"port,omitempty"` +} + +// UpstreamApplyConfiguration constructs a declarative configuration of the Upstream type for use with +// apply. +func Upstream() *UpstreamApplyConfiguration { + return &UpstreamApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *UpstreamApplyConfiguration) WithType(value operatorv1.UpstreamType) *UpstreamApplyConfiguration { + b.Type = &value + return b +} + +// WithAddress sets the Address field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Address field is set to the value of the last call. +func (b *UpstreamApplyConfiguration) WithAddress(value string) *UpstreamApplyConfiguration { + b.Address = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *UpstreamApplyConfiguration) WithPort(value uint32) *UpstreamApplyConfiguration { + b.Port = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go new file mode 100644 index 0000000000..ff90a2347f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go @@ -0,0 +1,59 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// UpstreamResolversApplyConfiguration represents a declarative configuration of the UpstreamResolvers type for use +// with apply. +type UpstreamResolversApplyConfiguration struct { + Upstreams []UpstreamApplyConfiguration `json:"upstreams,omitempty"` + Policy *operatorv1.ForwardingPolicy `json:"policy,omitempty"` + TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` + ProtocolStrategy *operatorv1.ProtocolStrategy `json:"protocolStrategy,omitempty"` +} + +// UpstreamResolversApplyConfiguration constructs a declarative configuration of the UpstreamResolvers type for use with +// apply. +func UpstreamResolvers() *UpstreamResolversApplyConfiguration { + return &UpstreamResolversApplyConfiguration{} +} + +// WithUpstreams adds the given value to the Upstreams field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Upstreams field. +func (b *UpstreamResolversApplyConfiguration) WithUpstreams(values ...*UpstreamApplyConfiguration) *UpstreamResolversApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithUpstreams") + } + b.Upstreams = append(b.Upstreams, *values[i]) + } + return b +} + +// WithPolicy sets the Policy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Policy field is set to the value of the last call. +func (b *UpstreamResolversApplyConfiguration) WithPolicy(value operatorv1.ForwardingPolicy) *UpstreamResolversApplyConfiguration { + b.Policy = &value + return b +} + +// WithTransportConfig sets the TransportConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TransportConfig field is set to the value of the last call. +func (b *UpstreamResolversApplyConfiguration) WithTransportConfig(value *DNSTransportConfigApplyConfiguration) *UpstreamResolversApplyConfiguration { + b.TransportConfig = value + return b +} + +// WithProtocolStrategy sets the ProtocolStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProtocolStrategy field is set to the value of the last call. +func (b *UpstreamResolversApplyConfiguration) WithProtocolStrategy(value operatorv1.ProtocolStrategy) *UpstreamResolversApplyConfiguration { + b.ProtocolStrategy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go new file mode 100644 index 0000000000..e75a767a31 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go @@ -0,0 +1,61 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSphereCSIDriverConfigSpecApplyConfiguration represents a declarative configuration of the VSphereCSIDriverConfigSpec type for use +// with apply. +type VSphereCSIDriverConfigSpecApplyConfiguration struct { + TopologyCategories []string `json:"topologyCategories,omitempty"` + GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` + GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` + GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` + MaxAllowedBlockVolumesPerNode *int32 `json:"maxAllowedBlockVolumesPerNode,omitempty"` +} + +// VSphereCSIDriverConfigSpecApplyConfiguration constructs a declarative configuration of the VSphereCSIDriverConfigSpec type for use with +// apply. +func VSphereCSIDriverConfigSpec() *VSphereCSIDriverConfigSpecApplyConfiguration { + return &VSphereCSIDriverConfigSpecApplyConfiguration{} +} + +// WithTopologyCategories adds the given value to the TopologyCategories field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologyCategories field. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithTopologyCategories(values ...string) *VSphereCSIDriverConfigSpecApplyConfiguration { + for i := range values { + b.TopologyCategories = append(b.TopologyCategories, values[i]) + } + return b +} + +// WithGlobalMaxSnapshotsPerBlockVolume sets the GlobalMaxSnapshotsPerBlockVolume field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GlobalMaxSnapshotsPerBlockVolume field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithGlobalMaxSnapshotsPerBlockVolume(value uint32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.GlobalMaxSnapshotsPerBlockVolume = &value + return b +} + +// WithGranularMaxSnapshotsPerBlockVolumeInVSAN sets the GranularMaxSnapshotsPerBlockVolumeInVSAN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GranularMaxSnapshotsPerBlockVolumeInVSAN field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithGranularMaxSnapshotsPerBlockVolumeInVSAN(value uint32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.GranularMaxSnapshotsPerBlockVolumeInVSAN = &value + return b +} + +// WithGranularMaxSnapshotsPerBlockVolumeInVVOL sets the GranularMaxSnapshotsPerBlockVolumeInVVOL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GranularMaxSnapshotsPerBlockVolumeInVVOL field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithGranularMaxSnapshotsPerBlockVolumeInVVOL(value uint32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.GranularMaxSnapshotsPerBlockVolumeInVVOL = &value + return b +} + +// WithMaxAllowedBlockVolumesPerNode sets the MaxAllowedBlockVolumesPerNode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxAllowedBlockVolumesPerNode field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithMaxAllowedBlockVolumesPerNode(value int32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.MaxAllowedBlockVolumesPerNode = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/register.go b/vendor/github.com/openshift/hypershift/api/hypershift/register.go deleted file mode 100644 index 4a0a91d4bd..0000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/register.go +++ /dev/null @@ -1,3 +0,0 @@ -package hypershift - -const GroupName = "hypershift.openshift.io" diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go new file mode 100644 index 0000000000..9edd421fb1 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go @@ -0,0 +1,20 @@ +package v1beta1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// AgentNodePoolPlatform specifies the configuration of a NodePool when operating +// on the Agent platform. +type AgentNodePoolPlatform struct { + // agentLabelSelector contains labels that must be set on an Agent in order to + // be selected for a Machine. + // +optional + AgentLabelSelector *metav1.LabelSelector `json:"agentLabelSelector,omitempty"` +} + +// AgentPlatformSpec specifies configuration for agent-based installations. +type AgentPlatformSpec struct { + // agentNamespace is the namespace where to search for Agents for this cluster + // +kubebuilder:validation:MaxLength=63 + // +required + AgentNamespace string `json:"agentNamespace"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go new file mode 100644 index 0000000000..533a31cb0d --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go @@ -0,0 +1,961 @@ +package v1beta1 + +// AWSNodePoolPlatform specifies the configuration of a NodePool when operating +// on AWS. +type AWSNodePoolPlatform struct { + // instanceType is an ec2 instance type for node instances (e.g. m5.large). + // +required + // +kubebuilder:validation:MaxLength=255 + InstanceType string `json:"instanceType"` + + // instanceProfile is the AWS EC2 instance profile, which is a container for an IAM role that the EC2 instance uses. + // +optional + // +kubebuilder:validation:MaxLength=255 + InstanceProfile string `json:"instanceProfile,omitempty"` + + // subnet is the subnet to use for node instances. + // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id.startsWith('subnet-') ? !has(self.filters) : size(self.filters) > 0", message="subnet is invalid, a valid subnet id or filters must be set, but not both" + // +required + Subnet AWSResourceReference `json:"subnet"` + + // ami is the image id to use for node instances. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + // +kubebuilder:validation:MaxLength=255 + AMI string `json:"ami,omitempty"` + + // securityGroups is an optional set of security groups to associate with node + // instances. + // + // +optional + // +kubebuilder:validation:MaxItems=50 + SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` + + // rootVolume specifies configuration for the root volume of node instances. + // + // +optional + RootVolume *Volume `json:"rootVolume,omitempty"` + + // resourceTags is an optional list of additional tags to apply to AWS node + // instances. Changes to this field will be propagated in-place to AWS EC2 instances and their initial EBS volumes. + // Volumes created by the storage operator and attached to instances after they are created do not get these tags applied. + // + // These will be merged with HostedCluster scoped tags, which take precedence in case of conflicts. + // These take precedence over tags defined out of band (i.e., tags added manually or by other tools outside of HyperShift) in AWS in case of conflicts. + // + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for + // information on tagging AWS resources. AWS supports a maximum of 50 tags per + // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available + // for the user. + // + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // placement specifies the placement options for the EC2 instances. + // + // +optional + Placement *PlacementOptions `json:"placement,omitempty"` +} + +// PlacementOptions specifies the placement options for the EC2 instances. +type PlacementOptions struct { + // tenancy indicates if instance should run on shared or single-tenant hardware. + // + // Possible values: + // default: NodePool instances run on shared hardware. + // dedicated: Each NodePool instance runs on single-tenant hardware. + // host: NodePool instances run on user's pre-allocated dedicated hosts. + // + // +optional + // +kubebuilder:validation:Enum:=default;dedicated;host + Tenancy string `json:"tenancy,omitempty"` + + // capacityReservation specifies Capacity Reservation options for the NodePool instances. + // + // +optional + CapacityReservation *CapacityReservationOptions `json:"capacityReservation,omitempty"` +} + +// MarketType describes the market type of the CapacityReservationo for an Instance. +type MarketType string + +const ( + // MarketTypeOnDemand is a MarketType enum value + MarketTypeOnDemand MarketType = "OnDemand" + + // MarketTypeCapacityBlock is a MarketType enum value + MarketTypeCapacityBlock MarketType = "CapacityBlocks" +) + +// CapacityReservationOptions specifies Capacity Reservation options for the NodePool instances. +type CapacityReservationOptions struct { + // id specifies the target Capacity Reservation into which the EC2 instances should be launched. + // Must follow the format: cr- followed by 17 lowercase hexadecimal characters. For example: cr-0123456789abcdef0 + // + // +kubebuilder:validation:XValidation:rule="self.matches('^cr-[a-f0-9]{17}$')", message="AWS Capacity Reservation ID must start with 'cr-' followed by 17 lowercase hexadecimal characters (e.g., cr-0123456789abcdef0)" + // +required + // +kubebuilder:validation:MinLength=20 + // +kubebuilder:validation:MaxLength=20 + ID string `json:"id"` + + // marketType specifies the market type of the CapacityReservation for the EC2 instances. Valid values are OnDemand, CapacityBlocks and omitted: + // "OnDemand": EC2 instances run as standard On-Demand instances. + // "CapacityBlocks": scheduled pre-purchased compute capacity. Capacity Blocks is recommended when GPUs are needed to support ML workloads. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default value is CapacityBlocks. + // + // +kubebuilder:validation:Enum:=OnDemand;CapacityBlocks + // +optional + MarketType MarketType `json:"marketType,omitempty"` +} + +// AWSResourceReference is a reference to a specific AWS resource by ID or filters. +// Only one of ID or Filters may be specified. Specifying more than one will result in +// a validation error. +type AWSResourceReference struct { + // id of resource + // +optional + // +kubebuilder:validation:MaxLength=255 + ID *string `json:"id,omitempty"` + + // filters is a set of key/value pairs used to identify a resource + // They are applied according to the rules defined by the AWS API: + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html + // +optional + // +kubebuilder:validation:MaxItems=50 + Filters []Filter `json:"filters,omitempty"` +} + +// Filter is a filter used to identify an AWS resource +type Filter struct { + // name is the name of the filter. + // +required + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // values is a list of values for the filter. + // +required + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:items:MaxLength=255 + Values []string `json:"values"` +} + +// Volume specifies the configuration options for node instance storage devices. +type Volume struct { + // size is the size of the volume in gibibytes (GiB). + // + // Must be greater than the image snapshot size or 8 (whichever is greater). + // + // +kubebuilder:validation:Minimum=8 + // +required + Size int64 `json:"size"` + + // type is the type of volume to provision. + // +required + // +kubebuilder:validation:MaxLength=255 + Type string `json:"type"` + + // iops is the number of IOPS requested for the disk. This is only valid + // for type io1. + // + // +optional + IOPS int64 `json:"iops,omitempty"` + + // encrypted indicates whether the EBS volume should be encrypted or not. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` + + // encryptionKey is the KMS key to use for volume encryption. + // +optional + // +kubebuilder:validation:MaxLength=2048 + EncryptionKey string `json:"encryptionKey,omitempty"` +} + +// AWSCloudProviderConfig specifies AWS networking configuration. +type AWSCloudProviderConfig struct { + // subnet is the subnet to use for control plane cloud resources. + // + // +optional + Subnet *AWSResourceReference `json:"subnet,omitempty"` + + // zone is the availability zone where control plane cloud resources are + // created. + // + // +optional + // +kubebuilder:validation:MaxLength=255 + Zone string `json:"zone,omitempty"` + + // vpc is the VPC to use for control plane cloud resources. + // +required + // +kubebuilder:validation:MaxLength=255 + VPC string `json:"vpc"` +} + +// AWSEndpointAccessType specifies the publishing scope of cluster endpoints. +type AWSEndpointAccessType string + +const ( + // Public endpoint access allows public API server access and public node + // communication with the control plane. + Public AWSEndpointAccessType = "Public" + + // PublicAndPrivate endpoint access allows public API server access and + // private node communication with the control plane. + PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate" + + // Private endpoint access allows only private API server access and private + // node communication with the control plane. + Private AWSEndpointAccessType = "Private" +) + +// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services. +type AWSPlatformSpec struct { + // region is the AWS region in which the cluster resides. This configures the + // OCP control plane cloud integrations, and is used by NodePool to resolve + // the correct boot AMI for a given release. + // + // +immutable + // +required + // +kubebuilder:validation:MaxLength=255 + Region string `json:"region"` + + // cloudProviderConfig specifies AWS networking configuration for the control + // plane. + // This is mainly used for cloud provider controller config: + // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364 + // TODO(dan): should this be named AWSNetworkConfig? + // + // +optional + // +immutable + CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"` + + // serviceEndpoints specifies optional custom endpoints which will override + // the default service endpoint of specific AWS Services. + // + // There must be only one ServiceEndpoint for a given service name. + // + // +optional + // +immutable + // +kubebuilder:validation:MaxItems=50 + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // rolesRef contains references to various AWS IAM roles required to enable + // integrations such as OIDC. + // + // +immutable + // +required + RolesRef AWSRolesRef `json:"rolesRef"` + + // resourceTags is a list of additional tags to apply to AWS resources created + // for the cluster. See + // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for + // information on tagging AWS resources. AWS supports a maximum of 50 tags per + // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available + // for the user. + // Changes to this field will be propagated in-place to AWS resources (VPC Endpoints, EC2 instances, initial EBS volumes and default/endpoint security groups). + // These tags will be propagated to the infrastructure CR in the guest cluster, where other OCP operators might choose to honor this input to reconcile AWS resources created by them. + // Please consult the official documentation for a list of all AWS resources that support in-place tag updates. + // These take precedence over tags defined out of band (i.e., tags added manually or by other tools outside of HyperShift) in AWS in case of conflicts. + // + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // endpointAccess specifies the publishing scope of cluster endpoints. The + // default is Public. + // + // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private + // +kubebuilder:default=Public + // +optional + EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"` + + // additionalAllowedPrincipals specifies a list of additional allowed principal ARNs + // to be added to the hosted control plane's VPC Endpoint Service to enable additional + // VPC Endpoint connection requests to be automatically accepted. + // See https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html + // for more details around VPC Endpoint Service allowed principals. + // + // +optional + // +kubebuilder:validation:MaxItems=25 + // +kubebuilder:validation:items:MaxLength=255 + AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` + + // multiArch specifies whether the Hosted Cluster will be expected to support NodePools with different + // CPU architectures, i.e., supporting arm64 NodePools and supporting amd64 NodePools on the same Hosted Cluster. + // Deprecated: This field is no longer used. The HyperShift Operator now performs multi-arch validations + // automatically despite the platform type. The HyperShift Operator will set HostedCluster.Status.PayloadArch based + // on the HostedCluster release image. This field is used by the NodePool controller to validate the + // NodePool.Spec.Arch is supported. + // +kubebuilder:default=false + // +optional + MultiArch bool `json:"multiArch"` + + // sharedVPC contains fields that must be specified if the HostedCluster must use a VPC that is + // created in a different AWS account and is shared with the AWS account where the HostedCluster + // will be created. + // + // +optional + SharedVPC *AWSSharedVPC `json:"sharedVPC,omitempty"` +} + +// AWSSharedVPC contains fields needed to create a HostedCluster using a VPC that has been +// created and shared from a different AWS account than the AWS account where the cluster +// is getting created. +type AWSSharedVPC struct { + // rolesRef contains references to roles in the VPC owner account that enable a + // HostedCluster on a shared VPC. + // + // +required + RolesRef AWSSharedVPCRolesRef `json:"rolesRef"` + + // localZoneID is the ID of the route53 hosted zone for [cluster-name].hypershift.local that is + // associated with the HostedCluster's VPC and exists in the VPC owner account. + // + // +required + // +kubebuilder:validation:MaxLength=32 + LocalZoneID string `json:"localZoneID"` +} + +type AWSRoleCredentials struct { + // arn is the ARN of the role. + // +required + // +kubebuilder:validation:MaxLength=2048 + ARN string `json:"arn"` + // namespace is the namespace of the role. + // +required + // +kubebuilder:validation:MaxLength=255 + Namespace string `json:"namespace"` + // name is the name of the role. + // +required + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key is the key of the tag. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + Key string `json:"key"` + // value is the value of the tag. + // + // Some AWS service do not support empty values. Since tags are added to + // resources in many services, the length of the tag value must meet the + // requirements of all services. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + Value string `json:"value"` +} + +// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. +// The referenced role must have a trust relationship that allows it to be assumed via web identity. +// https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. +// Example: +// +// { +// "Version": "2012-10-17", +// "Statement": [ +// { +// "Effect": "Allow", +// "Principal": { +// "Federated": "{{ .ProviderARN }}" +// }, +// "Action": "sts:AssumeRoleWithWebIdentity", +// "Condition": { +// "StringEquals": { +// "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} +// } +// } +// } +// ] +// } +type AWSRolesRef struct { + // ingressARN is an ARN value referencing a role appropriate for the Ingress Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "elasticloadbalancing:DescribeLoadBalancers", + // "tag:GetResources", + // "route53:ListHostedZones" + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ChangeResourceRecordSets" + // ], + // "Resource": [ + // "arn:aws:route53:::PUBLIC_ZONE_ID", + // "arn:aws:route53:::PRIVATE_ZONE_ID" + // ] + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + IngressARN string `json:"ingressARN"` + + // imageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "s3:CreateBucket", + // "s3:DeleteBucket", + // "s3:PutBucketTagging", + // "s3:GetBucketTagging", + // "s3:PutBucketPublicAccessBlock", + // "s3:GetBucketPublicAccessBlock", + // "s3:PutEncryptionConfiguration", + // "s3:GetEncryptionConfiguration", + // "s3:PutLifecycleConfiguration", + // "s3:GetLifecycleConfiguration", + // "s3:GetBucketLocation", + // "s3:ListBucket", + // "s3:GetObject", + // "s3:PutObject", + // "s3:DeleteObject", + // "s3:ListBucketMultipartUploads", + // "s3:AbortMultipartUpload", + // "s3:ListMultipartUploadParts" + // ], + // "Resource": "*" + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + ImageRegistryARN string `json:"imageRegistryARN"` + + // storageARN is an ARN value referencing a role appropriate for the Storage Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:AttachVolume", + // "ec2:CreateSnapshot", + // "ec2:CreateTags", + // "ec2:CreateVolume", + // "ec2:DeleteSnapshot", + // "ec2:DeleteTags", + // "ec2:DeleteVolume", + // "ec2:DescribeInstances", + // "ec2:DescribeSnapshots", + // "ec2:DescribeTags", + // "ec2:DescribeVolumes", + // "ec2:DescribeVolumesModifications", + // "ec2:DetachVolume", + // "ec2:ModifyVolume" + // ], + // "Resource": "*" + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + StorageARN string `json:"storageARN"` + + // networkARN is an ARN value referencing a role appropriate for the Network Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:DescribeInstances", + // "ec2:DescribeInstanceStatus", + // "ec2:DescribeInstanceTypes", + // "ec2:UnassignPrivateIpAddresses", + // "ec2:AssignPrivateIpAddresses", + // "ec2:UnassignIpv6Addresses", + // "ec2:AssignIpv6Addresses", + // "ec2:DescribeSubnets", + // "ec2:DescribeNetworkInterfaces" + // ], + // "Resource": "*" + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + NetworkARN string `json:"networkARN"` + + // kubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. + // Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Action": [ + // "autoscaling:DescribeAutoScalingGroups", + // "autoscaling:DescribeLaunchConfigurations", + // "autoscaling:DescribeTags", + // "ec2:DescribeAvailabilityZones", + // "ec2:DescribeInstances", + // "ec2:DescribeImages", + // "ec2:DescribeRegions", + // "ec2:DescribeRouteTables", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeSubnets", + // "ec2:DescribeVolumes", + // "ec2:CreateSecurityGroup", + // "ec2:CreateTags", + // "ec2:CreateVolume", + // "ec2:ModifyInstanceAttribute", + // "ec2:ModifyVolume", + // "ec2:AttachVolume", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:CreateRoute", + // "ec2:DeleteRoute", + // "ec2:DeleteSecurityGroup", + // "ec2:DeleteVolume", + // "ec2:DetachVolume", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:DescribeVpcs", + // "elasticloadbalancing:AddTags", + // "elasticloadbalancing:AttachLoadBalancerToSubnets", + // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + // "elasticloadbalancing:CreateLoadBalancer", + // "elasticloadbalancing:CreateLoadBalancerPolicy", + // "elasticloadbalancing:CreateLoadBalancerListeners", + // "elasticloadbalancing:ConfigureHealthCheck", + // "elasticloadbalancing:DeleteLoadBalancer", + // "elasticloadbalancing:DeleteLoadBalancerListeners", + // "elasticloadbalancing:DescribeLoadBalancers", + // "elasticloadbalancing:DescribeLoadBalancerAttributes", + // "elasticloadbalancing:DetachLoadBalancerFromSubnets", + // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + // "elasticloadbalancing:ModifyLoadBalancerAttributes", + // "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + // "elasticloadbalancing:AddTags", + // "elasticloadbalancing:CreateListener", + // "elasticloadbalancing:CreateTargetGroup", + // "elasticloadbalancing:DeleteListener", + // "elasticloadbalancing:DeleteTargetGroup", + // "elasticloadbalancing:DeregisterTargets", + // "elasticloadbalancing:DescribeListeners", + // "elasticloadbalancing:DescribeLoadBalancerPolicies", + // "elasticloadbalancing:DescribeTargetGroups", + // "elasticloadbalancing:DescribeTargetHealth", + // "elasticloadbalancing:ModifyListener", + // "elasticloadbalancing:ModifyTargetGroup", + // "elasticloadbalancing:RegisterTargets", + // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + // "iam:CreateServiceLinkedRole", + // "kms:DescribeKey" + // ], + // "Resource": [ + // "*" + // ], + // "Effect": "Allow" + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + KubeCloudControllerARN string `json:"kubeCloudControllerARN"` + + // nodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Action": [ + // "ec2:AssociateRouteTable", + // "ec2:AttachInternetGateway", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:CreateInternetGateway", + // "ec2:CreateNatGateway", + // "ec2:CreateRoute", + // "ec2:CreateRouteTable", + // "ec2:CreateSecurityGroup", + // "ec2:CreateSubnet", + // "ec2:CreateTags", + // "ec2:DeleteInternetGateway", + // "ec2:DeleteNatGateway", + // "ec2:DeleteRouteTable", + // "ec2:DeleteSecurityGroup", + // "ec2:DeleteSubnet", + // "ec2:DeleteTags", + // "ec2:DescribeAccountAttributes", + // "ec2:DescribeAddresses", + // "ec2:DescribeAvailabilityZones", + // "ec2:DescribeImages", + // "ec2:DescribeInstances", + // "ec2:DescribeInternetGateways", + // "ec2:DescribeNatGateways", + // "ec2:DescribeNetworkInterfaces", + // "ec2:DescribeNetworkInterfaceAttribute", + // "ec2:DescribeRouteTables", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeSubnets", + // "ec2:DescribeVpcs", + // "ec2:DescribeVpcAttribute", + // "ec2:DescribeVolumes", + // "ec2:DetachInternetGateway", + // "ec2:DisassociateRouteTable", + // "ec2:DisassociateAddress", + // "ec2:ModifyInstanceAttribute", + // "ec2:ModifyNetworkInterfaceAttribute", + // "ec2:ModifySubnetAttribute", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RunInstances", + // "ec2:TerminateInstances", + // "tag:GetResources", + // "ec2:CreateLaunchTemplate", + // "ec2:CreateLaunchTemplateVersion", + // "ec2:DescribeLaunchTemplates", + // "ec2:DescribeLaunchTemplateVersions", + // "ec2:DeleteLaunchTemplate", + // "ec2:DeleteLaunchTemplateVersions" + // ], + // "Resource": [ + // "*" + // ], + // "Effect": "Allow" + // }, + // { + // "Condition": { + // "StringLike": { + // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" + // } + // }, + // "Action": [ + // "iam:CreateServiceLinkedRole" + // ], + // "Resource": [ + // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" + // ], + // "Effect": "Allow" + // }, + // { + // "Action": [ + // "iam:PassRole" + // ], + // "Resource": [ + // "arn:*:iam::*:role/*-worker-role" + // ], + // "Effect": "Allow" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "kms:Decrypt", + // "kms:ReEncrypt", + // "kms:GenerateDataKeyWithoutPlainText", + // "kms:DescribeKey" + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "kms:CreateGrant" + // ], + // "Resource": "*", + // "Condition": { + // "Bool": { + // "kms:GrantIsForAWSResource": true + // } + // } + // } + // ] + // } + // + // +required + // +kubebuilder:validation:MaxLength=2048 + NodePoolManagementARN string `json:"nodePoolManagementARN"` + + // controlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:CreateVpcEndpoint", + // "ec2:DescribeVpcEndpoints", + // "ec2:ModifyVpcEndpoint", + // "ec2:DeleteVpcEndpoints", + // "ec2:CreateTags", + // "route53:ListHostedZones", + // "ec2:CreateSecurityGroup", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:AuthorizeSecurityGroupEgress", + // "ec2:DeleteSecurityGroup", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RevokeSecurityGroupEgress", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeVpcs", + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "arn:aws:route53:::%s" + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"` +} + +// AWSSharedVPCRolesRef contains references to AWS IAM roles required for a shared VPC hosted cluster. +// These roles must exist in the VPC owner's account. +type AWSSharedVPCRolesRef struct { + // ingressARN is an ARN value referencing the role in the VPC owner account that allows the + // ingress operator in the cluster account to create and manage records in the private DNS + // hosted zone. + // + // The referenced role must have a trust relationship that allows it to be assumed by the + // ingress operator role in the VPC creator account. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Sid": "Statement1", + // "Effect": "Allow", + // "Principal": { + // "AWS": "arn:aws:iam::[cluster-creator-account-id]:role/[infra-id]-openshift-ingress" + // }, + // "Action": "sts:AssumeRole" + // } + // ] + // } + // + // The following is an example of the policy document for this role. + // (Based on https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa-shared-vpc-config.html#rosa-sharing-vpc-dns-and-roles_rosa-shared-vpc-config) + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ListHostedZones", + // "route53:ListHostedZonesByName", + // "route53:ChangeTagsForResource", + // "route53:GetAccountLimit", + // "route53:GetChange", + // "route53:GetHostedZone", + // "route53:ListTagsForResource", + // "route53:UpdateHostedZoneComment", + // "tag:GetResources", + // "tag:UntagResources" + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "*" + // }, + // ] + // } + // + // +required + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +kubebuilder:validation:MaxLength=2048 + IngressARN string `json:"ingressARN"` + + // controlPlaneARN is an ARN value referencing the role in the VPC owner account that allows + // the control plane operator in the cluster account to create and manage a VPC endpoint, its + // corresponding Security Group, and DNS records in the hypershift local hosted zone. + // + // The referenced role must have a trust relationship that allows it to be assumed by the + // control plane operator role in the VPC creator account. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Sid": "Statement1", + // "Effect": "Allow", + // "Principal": { + // "AWS": "arn:aws:iam::[cluster-creator-account-id]:role/[infra-id]-control-plane-operator" + // }, + // "Action": "sts:AssumeRole" + // } + // ] + // } + // + // The following is an example of the policy document for this role. + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:CreateVpcEndpoint", + // "ec2:DescribeVpcEndpoints", + // "ec2:ModifyVpcEndpoint", + // "ec2:DeleteVpcEndpoints", + // "ec2:CreateTags", + // "route53:ListHostedZones", + // "ec2:CreateSecurityGroup", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:AuthorizeSecurityGroupEgress", + // "ec2:DeleteSecurityGroup", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RevokeSecurityGroupEgress", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeVpcs", + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "*" + // } + // ] + // } + // + // +required + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +kubebuilder:validation:MaxLength=2048 + ControlPlaneARN string `json:"controlPlaneARN"` +} + +// AWSServiceEndpoint stores the configuration for services to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // name is the name of the AWS service. + // This must be provided and cannot be empty. + // +required + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +required + // +kubebuilder:validation:Pattern=`^https://` + // +kubebuilder:validation:MaxLength=2048 + URL string `json:"url"` +} + +// AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider +type AWSKMSSpec struct { + // region contains the AWS region + // +required + // +kubebuilder:validation:MaxLength=255 + Region string `json:"region"` + // activeKey defines the active key used to encrypt new secrets + // +required + ActiveKey AWSKMSKeyEntry `json:"activeKey"` + // backupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *AWSKMSKeyEntry `json:"backupKey,omitempty"` + // auth defines metadata about the management of credentials used to interact with AWS KMS + // +required + Auth AWSKMSAuthSpec `json:"auth"` +} + +// AWSKMSAuthSpec defines metadata about the management of credentials used to interact and encrypt data via AWS KMS key. +// The referenced role must have a trust relationship that allows it to be assumed via web identity. +// https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. +// Example: +// +// { +// "Version": "2012-10-17", +// "Statement": [ +// { +// "Effect": "Allow", +// "Principal": { +// "Federated": "{{ .ProviderARN }}" +// }, +// "Action": "sts:AssumeRoleWithWebIdentity", +// "Condition": { +// "StringEquals": { +// "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} +// } +// } +// } +// ] +// } +type AWSKMSAuthSpec struct { + // awsKms is an ARN value referencing a role appropriate for managing the auth via the AWS KMS key. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "kms:Encrypt", + // "kms:Decrypt", + // "kms:ReEncrypt*", + // "kms:GenerateDataKey*", + // "kms:DescribeKey" + // ], + // "Resource": %q + // } + // ] + // } + // +required + // +kubebuilder:validation:MaxLength=2048 + AWSKMSRoleARN string `json:"awsKms"` +} + +// AWSKMSKeyEntry defines metadata to locate the encryption key in AWS +type AWSKMSKeyEntry struct { + // arn is the Amazon Resource Name for the encryption key + // +required + // +kubebuilder:validation:Pattern=`^arn:` + // +kubebuilder:validation:MaxLength=2048 + ARN string `json:"arn"` +} + +// AWSPlatformStatus contains status specific to the AWS platform +type AWSPlatformStatus struct { + // defaultWorkerSecurityGroupID is the ID of a security group created by + // the control plane operator. It is always added to worker machines in + // addition to any security groups specified in the NodePool. + // +optional + // +kubebuilder:validation:MaxLength=255 + DefaultWorkerSecurityGroupID string `json:"defaultWorkerSecurityGroupID,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go new file mode 100644 index 0000000000..127f996adf --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go @@ -0,0 +1,737 @@ +package v1beta1 + +import ( + "fmt" +) + +// AzureVMImageType is used to specify the source of the Azure VM boot image. +// Valid values are ImageID and AzureMarketplace. +// +kubebuilder:validation:Enum:=ImageID;AzureMarketplace +type AzureVMImageType string + +const ( + // ImageID is the used to specify that an Azure resource ID of a VHD image is used to boot the Azure VMs from. + ImageID AzureVMImageType = "ImageID" + + // AzureMarketplace is used to specify the Azure Marketplace image info to use to boot the Azure VMs from. + AzureMarketplace AzureVMImageType = "AzureMarketplace" +) + +// AzureNodePoolPlatform is the platform specific configuration for an Azure node pool. +type AzureNodePoolPlatform struct { + // vmSize is the Azure VM instance type to use for the nodes being created in the nodepool. + // The size naming convention is documented here https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. + // Size names should start with a Family name, which is represented by one of more capital letters, and then be followed by the CPU count. + // This is followed by 0 or more additional features, represented by a, b, d, i, l, m, p, t, s, C, and NP, refer to the Azure documentation for an explanation of these features. + // Optionally an accelerator such as a GPU can be added, prefixed by an underscore, for example A100, H100 or MI300X. + // The size may also be versioned, in which case it should be suffixed with _v where the version is a number. + // For example, "D32ads_v5" would be a suitable general purpose VM size, or "ND96_MI300X_v5" would represent a GPU accelerated VM. + // + // + Azure VM size format described in https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions + // + "[A-Z]+[0-9]+(-[0-9]+)?" - Series, size and constrained CPU size + // + "[abdilmptsCNP]*" - Additive features + // + "(_[A-Z]*[0-9]+[A-Z]*)?" - Optional accelerator types + // +kubebuilder:validation:Pattern=`^(Standard_|Basic_)?[A-Z]+[0-9]+(-[0-9]+)?[abdilmptsCNP]*(_[A-Z]*[0-9]+[A-Z]*)?(_v[0-9]+)?$` + // +required + // +kubebuilder:validation:MaxLength=255 + VMSize string `json:"vmSize"` + + // image is used to configure the VM boot image. If unset, the default image at the location below will be used and + // is expected to exist: subscription//resourceGroups//providers/Microsoft.Compute/images/rhcos.x86_64.vhd. + // The and the are expected to be the same resource group documented in the + // Hosted Cluster specification respectively, HostedCluster.Spec.Platform.Azure.SubscriptionID and + // HostedCluster.Spec.Platform.Azure.ResourceGroupName. + // + // +required + Image AzureVMImage `json:"image"` + + // osDisk provides configuration for the OS disk for the nodepool. + // This can be used to configure the size, storage account type, encryption options and whether the disk is persistent or ephemeral. + // When not provided, the platform will choose reasonable defaults which are subject to change over time. + // Review the fields within the osDisk for more details. + // +required + OSDisk AzureNodePoolOSDisk `json:"osDisk"` + + // availabilityZone is the failure domain identifier where the VM should be attached to. This must not be specified + // for clusters in a location that does not support AvailabilityZone because it would cause a failure from Azure API. + // +kubebuilder:validation:XValidation:rule="self in ['1', '2', '3']" + // +optional + // +kubebuilder:validation:MaxLength=255 + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // encryptionAtHost enables encryption at host on virtual machines. According to Microsoft documentation, this + // means data stored on the VM host is encrypted at rest and flows encrypted to the Storage service. See + // https://learn.microsoft.com/en-us/azure/virtual-machines/disks-enable-host-based-encryption-portal?tabs=azure-powershell + // for more information. + // + // +kubebuilder:default=Enabled + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + EncryptionAtHost string `json:"encryptionAtHost,omitempty"` + + // subnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a + // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must + // exist in the same network, HostedCluster.Spec.Platform.Azure.VnetID, and must exist under the same subscription ID, + // HostedCluster.Spec.Platform.Azure.SubscriptionID. + // subnetID is immutable once set. + // The subnetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and parenthesis and must not end with a period (.) character. + // The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods and must not end with either a period (.) or hyphen (-) character. + // The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character and must not end with a period (.) or hyphen (-) character. + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 11 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Network/virtualNetworks/.*/subnets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`" + // +kubebuilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and parenthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the subnetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[8].matches('[a-zA-Z0-9-_\\.]{2,64}')`,message="The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[8].endsWith('.') && !self.split('/')[8].endsWith('-')",message="the vnetName in the subnetID must not end with either a period (.) or hyphen (-) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[10].matches('[a-zA-Z0-9][a-zA-Z0-9-_\\.]{0,79}')`,message="The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[10].endsWith('.') && !self.split('/')[10].endsWith('-')",message="the subnetName in the subnetID must not end with a period (.) or hyphen (-) character" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=355 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" + // +required + SubnetID string `json:"subnetID"` + + // diagnostics specifies the diagnostics settings for a virtual machine. + // If not specified, then Boot diagnostics will be disabled. + // +optional + Diagnostics *Diagnostics `json:"diagnostics,omitempty"` +} + +// AzureVMImage represents the different types of boot image sources that can be provided for an Azure VM. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ImageID' ? has(self.imageID) : !has(self.imageID)",message="imageID is required when type is ImageID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AzureMarketplace' ? has(self.azureMarketplace) : !has(self.azureMarketplace)",message="azureMarketplace is required when type is RequiredMember, and forbidden otherwise" +// +union +type AzureVMImage struct { + // type is the type of image data that will be provided to the Azure VM. + // Valid values are "ImageID" and "AzureMarketplace". + // ImageID means is used for legacy managed VM images. This is where the user uploads a VM image directly to their resource group. + // AzureMarketplace means the VM will boot from an Azure Marketplace image. + // Marketplace images are preconfigured and published by the OS vendors and may include preconfigured software for the VM. + // + // +required + // +unionDiscriminator + Type AzureVMImageType `json:"type"` + + // imageID is the Azure resource ID of a VHD image to use to boot the Azure VMs from. + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +optional + // +unionMember + // +kubebuilder:validation:MaxLength=255 + ImageID *string `json:"imageID,omitempty"` + + // azureMarketplace contains the Azure Marketplace image info to use to boot the Azure VMs from. + // + // +optional + // +unionMember + AzureMarketplace *AzureMarketplaceImage `json:"azureMarketplace,omitempty"` +} + +// AzureMarketplaceImage specifies the information needed to create an Azure VM from an Azure Marketplace image. +// + This struct replicates the same fields found in CAPZ - https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/main/api/v1beta1/types.go. +type AzureMarketplaceImage struct { + // publisher is the name of the organization that created the image. + // It must be between 3 and 50 characters in length, and consist of only lowercase letters, numbers, and hyphens (-) and underscores (_). + // It must start with a lowercase letter or a number. + // TODO: Can we explain where a user might find this value, or provide an example of one they might want to use + // + // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-_]{2,49}$` + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:MaxLength=50 + // +required + Publisher string `json:"publisher"` + + // offer specifies the name of a group of related images created by the publisher. + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +required + Offer string `json:"offer"` + + // sku specifies an instance of an offer, such as a major release of a distribution. + // For example, 22_04-lts-gen2, 8-lvm-gen2. + // The value must consist only of lowercase letters, numbers, and hyphens (-) and underscores (_). + // TODO: What about length limits? + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-_]+$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +required + SKU string `json:"sku"` + + // version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, + // Minor, and Build are decimal numbers, e.g. '1.2.0'. Specify 'latest' to use the latest version of an image available at + // deployment time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a + // new version becomes available. + // + // +kubebuilder:validation:Pattern=`^[0-9]+\.[0-9]+\.[0-9]+$|^latest$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +required + Version string `json:"version"` +} + +// AzureDiagnosticsStorageAccountType specifies the type of storage account for storing Azure VM diagnostics data. +// +kubebuilder:validation:Enum=Managed;UserManaged;Disabled +type AzureDiagnosticsStorageAccountType string + +func (a *AzureDiagnosticsStorageAccountType) String() string { + return string(*a) +} + +func (a *AzureDiagnosticsStorageAccountType) Set(s string) error { + switch s { + case string(AzureDiagnosticsStorageAccountTypeDisabled), string(AzureDiagnosticsStorageAccountTypeManaged), string(AzureDiagnosticsStorageAccountTypeUserManaged): + *a = AzureDiagnosticsStorageAccountType(s) + return nil + default: + return fmt.Errorf("unknown Azure diagnostics storage account type: %s", s) + } +} + +func (a *AzureDiagnosticsStorageAccountType) Type() string { + return "AzureDiagnosticsStorageAccountType" +} + +const ( + AzureDiagnosticsStorageAccountTypeDisabled = AzureDiagnosticsStorageAccountType("Disabled") + AzureDiagnosticsStorageAccountTypeManaged = AzureDiagnosticsStorageAccountType("Managed") + AzureDiagnosticsStorageAccountTypeUserManaged = AzureDiagnosticsStorageAccountType("UserManaged") +) + +// Diagnostics specifies the diagnostics settings for a virtual machine. +// +kubebuilder:validation:XValidation:rule="self.storageAccountType == 'UserManaged' ? has(self.userManaged) : !has(self.userManaged)", message="userManaged is required when storageAccountType is UserManaged, and forbidden otherwise" +// +union +type Diagnostics struct { + // storageAccountType determines if the storage account for storing the diagnostics data + // should be disabled (Disabled), provisioned by Azure (Managed) or by the user (UserManaged). + // +kubebuilder:validation:Enum=Managed;UserManaged;Disabled + // +kubebuilder:default:=Disabled + // +unionDiscriminator + // +optional + StorageAccountType AzureDiagnosticsStorageAccountType `json:"storageAccountType,omitempty"` + + // userManaged specifies the diagnostics settings for a virtual machine when the storage account is managed by the user. + // +optional + // +unionMember + UserManaged *UserManagedDiagnostics `json:"userManaged,omitempty"` +} + +// UserManagedDiagnostics specifies the diagnostics settings for a virtual machine when the storage account is managed by the user. +type UserManagedDiagnostics struct { + // storageAccountURI is the URI of the user-managed storage account. + // The URI typically will be `https://.blob.core.windows.net/` + // but may differ if you are using Azure DNS zone endpoints. + // You can find the correct endpoint by looking for the Blob Primary Endpoint in the + // endpoints tab in the Azure console or with the CLI by issuing + // `az storage account list --query='[].{name: name, "resource group": resourceGroup, "blob endpoint": primaryEndpoints.blob}'`. + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'", message="storageAccountURI must be a valid HTTPS URL" + // +kubebuilder:validation:MaxLength=1024 + // +required + StorageAccountURI string `json:"storageAccountURI"` +} + +// +kubebuilder:validation:Enum=Premium_LRS;PremiumV2_LRS;Standard_LRS;StandardSSD_LRS;UltraSSD_LRS +type AzureDiskStorageAccountType string + +// Values copied from https://github.com/openshift/cluster-api-provider-azure/blob/release-4.18/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go#L614 +// excluding zone redundant storage(ZRS) types as they are not available in all regions. +const ( + // DiskStorageAccountTypesPremiumLRS - Premium SSD locally redundant storage. Best for production and performance sensitive + // workloads. + DiskStorageAccountTypesPremiumLRS AzureDiskStorageAccountType = "Premium_LRS" + // DiskStorageAccountTypesPremiumV2LRS - Premium SSD v2 locally redundant storage. Best for production and performance-sensitive + // workloads that consistently require low latency and high IOPS and throughput. + DiskStorageAccountTypesPremiumV2LRS AzureDiskStorageAccountType = "PremiumV2_LRS" + // DiskStorageAccountTypesStandardLRS - Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent + // access. + DiskStorageAccountTypesStandardLRS AzureDiskStorageAccountType = "Standard_LRS" + // DiskStorageAccountTypesStandardSSDLRS - Standard SSD locally redundant storage. Best for web servers, lightly used enterprise + // applications and dev/test. + DiskStorageAccountTypesStandardSSDLRS AzureDiskStorageAccountType = "StandardSSD_LRS" + // DiskStorageAccountTypesUltraSSDLRS - Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, + // top tier databases (for example, SQL, Oracle), and other transaction-heavy workloads. + DiskStorageAccountTypesUltraSSDLRS AzureDiskStorageAccountType = "UltraSSD_LRS" +) + +// +kubebuilder:validation:Enum=Persistent;Ephemeral +type AzureDiskPersistence string + +const ( + // PersistentDiskPersistence is the persistent disk type. + PersistentDiskPersistence AzureDiskPersistence = "Persistent" + + // EphemeralDiskPersistence is the ephemeral disk type. + EphemeralDiskPersistence AzureDiskPersistence = "Ephemeral" +) + +// +kubebuilder:validation:XValidation:rule="!has(self.diskStorageAccountType) || self.diskStorageAccountType != 'UltraSSD_LRS' || self.sizeGiB <= 32767",message="When not using diskStorageAccountType UltraSSD_LRS, the SizeGB value must be less than or equal to 32,767" +type AzureNodePoolOSDisk struct { + // sizeGiB is the size in GiB (1024^3 bytes) to assign to the OS disk. + // This should be between 16 and 65,536 when using the UltraSSD_LRS storage account type and between 16 and 32,767 when using any other storage account type. + // When not set, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is 30. + // + // +kubebuilder:validation:Minimum=16 + // +kubebuilder:validation:Maximum=65536 + // +optional + SizeGiB int32 `json:"sizeGiB,omitempty"` + + // diskStorageAccountType is the disk storage account type to use. + // Valid values are Premium_LRS, PremiumV2_LRS, Standard_LRS, StandardSSD_LRS, UltraSSD_LRS. + // Note that Standard means a HDD. + // The disk performance is tied to the disk type, please refer to the Azure documentation for further details + // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison. + // When omitted this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is Premium SSD LRS. + // + // +optional + DiskStorageAccountType AzureDiskStorageAccountType `json:"diskStorageAccountType,omitempty"` + + // encryptionSetID is the ID of the DiskEncryptionSet resource to use to encrypt the OS disks for the VMs. + // Configuring a DiskEncyptionSet allows greater control over the encryption of the VM OS disk at rest. + // Can be used with either platform (Azure) managed, or customer managed encryption keys. + // This needs to exist in the same subscription id listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.SubscriptionID. + // DiskEncryptionSetID should also exist in a resource group under the same subscription id and the same location + // listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.Location. + // The encryptionSetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Copmute/diskEncryptionSets/{resourceName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and parenthesis and must not end with a period (.) character. + // The resourceName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores. + // TODO: Are there other encryption related options we may want to expose, should this be in a struct as well? + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 9 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Compute/diskEncryptionSets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{resourceName}`" + // +kubebuilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and parenthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the encryptionSetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule="self.split('/')[8].matches('[a-zA-Z0-9-_]{1,80}')",message="The resourceName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=285 + // +optional + EncryptionSetID string `json:"encryptionSetID,omitempty"` + + // persistence determines whether the OS disk should be persisted beyond the life of the VM. + // Valid values are Persistent and Ephemeral. + // When set to Ephmeral, the OS disk will not be persisted to Azure storage and implies restrictions to the VM size and caching type. + // Full details can be found in the Azure documentation https://learn.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks. + // Ephmeral disks are primarily used for stateless applications, provide lower latency than Persistent disks and also incur no storage costs. + // When not set, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // + // +optional + Persistence AzureDiskPersistence `json:"persistence,omitempty"` +} + +// AzurePlatformSpec specifies configuration for clusters running on Azure. Generally, the HyperShift API assumes bring +// your own (BYO) cloud infrastructure resources. For example, resources like a resource group, a subnet, or a vnet +// would be pre-created and then their names would be used respectively in the ResourceGroupName, SubnetName, VnetName +// fields of the Hosted Cluster CR. An existing cloud resource is expected to exist under the same SubscriptionID. +type AzurePlatformSpec struct { + // cloud is the cloud environment identifier, valid values could be found here: https://github.com/Azure/go-autorest/blob/4c0e21ca2bbb3251fe7853e6f9df6397f53dd419/autorest/azure/environments.go#L33 + // + // +kubebuilder:validation:Enum=AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud + // +kubebuilder:default="AzurePublicCloud" + // +optional + Cloud string `json:"cloud,omitempty"` + + // location is the Azure region in where all the cloud infrastructure resources will be created. + // + // Example: eastus + // + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Location is immutable" + // +kubebuilder:validation:MaxLength=255 + // +immutable + Location string `json:"location"` + + // resourceGroup is the name of an existing resource group where all cloud resources created by the Hosted + // Cluster are to be placed. The resource group is expected to exist under the same subscription as SubscriptionID. + // + // In ARO HCP, this will be the managed resource group where customer cloud resources will be created. + // + // Resource group naming requirements can be found here: https://azure.github.io/PSRule.Rules.Azure/en/rules/Azure.ResourceGroup.Name/. + // + //Example: if your resource group ID is /subscriptions//resourceGroups/, your + // ResourceGroupName is . + // + // +kubebuilder:default:=default + // +required + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_()\-\.]{1,89}[a-zA-Z0-9_()\-]$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ResourceGroupName is immutable" + // +kubebuilder:validation:MaxLength=90 + // +immutable + ResourceGroupName string `json:"resourceGroup"` + + // vnetID is the ID of an existing VNET to use in creating VMs. The VNET can exist in a different resource group + // other than the one specified in ResourceGroupName, but it must exist under the same subscription as + // SubscriptionID. + // + // In ARO HCP, this will be the ID of the customer provided VNET. + // + // Example: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/ + // + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="VnetID is immutable" + // +kubebuilder:validation:MaxLength=255 + // +immutable + VnetID string `json:"vnetID"` + + // subnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a + // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must + // exist in the same network, HostedCluster.Spec.Platform.Azure.VnetID, and must exist under the same subscription ID, + // HostedCluster.Spec.Platform.Azure.SubscriptionID. + // subnetID is immutable once set. + // The subnetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and parenthesis and must not end with a period (.) character. + // The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods and must not end with either a period (.) or hyphen (-) character. + // The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character and must not end with a period (.) or hyphen (-) character. + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 11 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Network/virtualNetworks/.*/subnets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and parenthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the subnetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[8].matches('[a-zA-Z0-9-_\\.]{2,64}')`,message="The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[8].endsWith('.') && !self.split('/')[8].endsWith('-')",message="the vnetName in the subnetID must not end with either a period (.) or hyphen (-) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[10].matches('[a-zA-Z0-9][a-zA-Z0-9-_\\.]{0,79}')`,message="The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[10].endsWith('.') && !self.split('/')[10].endsWith('-')",message="the subnetName in the subnetID must not end with a period (.) or hyphen (-) character" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=355 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" + // +required + SubnetID string `json:"subnetID"` + + // subscriptionID is a unique identifier for an Azure subscription used to manage resources. + // + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SubscriptionID is immutable" + // +kubebuilder:validation:MaxLength=255 + // +immutable + SubscriptionID string `json:"subscriptionID"` + + // securityGroupID is the ID of an existing security group on the SubnetID. This field is provided as part of the + // configuration for the Azure cloud provider, aka Azure cloud controller manager (CCM). This security group is + // expected to exist under the same subscription as SubscriptionID. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SecurityGroupID is immutable" + // +required + // +kubebuilder:validation:MaxLength=255 + // +immutable + SecurityGroupID string `json:"securityGroupID"` + + // azureAuthenticationConfig is the type of Azure authentication configuration to use to authenticate with Azure's + // Cloud API. + // + // +required + AzureAuthenticationConfig AzureAuthenticationConfiguration `json:"azureAuthenticationConfig"` + + // tenantID is a unique identifier for the tenant where Azure resources will be created and managed in. + // + // +required + // +kubebuilder:validation:MaxLength=255 + TenantID string `json:"tenantID"` +} + +// objectEncoding represents the encoding for the Azure Key Vault secret containing the certificate related to +// CertificateName. objectEncoding needs to match the encoding format used when the certificate was stored in the +// Azure Key Vault. If objectEncoding doesn't match the encoding format of the certificate, the certificate will +// unsuccessfully be read by the Secrets CSI driver and an error will occur. This error will only be visible on the +// SecretProviderClass custom resource related to the managed identity. +// +// The default value is utf-8. +// +// See this for more info - https://github.com/Azure/secrets-store-csi-driver-provider-azure/blob/master/website/content/en/getting-started/usage/_index.md +// +// +kubebuilder:validation:Enum:=utf-8;hex;base64 +// +kubebuilder:default:="utf-8" +type ObjectEncodingFormat string + +// ManagedAzureKeyVault is an Azure Key Vault on the management cluster. +type ManagedAzureKeyVault struct { + // name is the name of the Azure Key Vault on the management cluster. + // + // +required + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // tenantID is the tenant ID of the Azure Key Vault on the management cluster. + // + // +required + // +kubebuilder:validation:MaxLength=255 + TenantID string `json:"tenantID"` +} + +// AzureResourceManagedIdentities contains the managed identities needed for HCP control plane and data plane components +// that authenticate with Azure's API. +type AzureResourceManagedIdentities struct { + // controlPlane contains the client IDs of all the managed identities on the HCP control plane needing to + // authenticate with Azure's API. + // + // +required + ControlPlane ControlPlaneManagedIdentities `json:"controlPlane"` + + // dataPlane contains the client IDs of all the managed identities on the data plane needing to authenticate with + // Azure's API. + // + // +required + DataPlane DataPlaneManagedIdentities `json:"dataPlane"` +} + +// AzureClientID is a string that represents the client ID of a managed identity. +// +// +kubebuilder:validation:XValidation:rule="self.matches('^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$')",message="the client ID of a managed identity must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12." +// +kubebuilder:validation:MinLength=36 +// +kubebuilder:validation:MaxLength=36 +// +kubebuilder:validation:Pattern=`^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$` +type AzureClientID string + +// AzureWorkloadIdentities is a struct that contains the client IDs of all the managed identities in self-managed Azure +// needing to authenticate with Azure's API. +type AzureWorkloadIdentities struct { + // imageRegistry is the client ID of a federated managed identity, associated with cluster-image-registry-operator, used in + // workload identity authentication. + // +required + ImageRegistry WorkloadIdentity `json:"imageRegistry"` + + // ingress is the client ID of a federated managed identity, associated with cluster-ingress-operator, used in + // workload identity authentication. + // +required + Ingress WorkloadIdentity `json:"ingress"` + + // file is the client ID of a federated managed identity, associated with cluster-storage-operator-file, + // used in workload identity authentication. + // +required + File WorkloadIdentity `json:"file"` + + // disk is the client ID of a federated managed identity, associated with cluster-storage-operator-disk, + // used in workload identity authentication. + // +required + Disk WorkloadIdentity `json:"disk"` + + // nodePoolManagement is the client ID of a federated managed identity, associated with cluster-api-provider-azure, used + // in workload identity authentication. + // +required + NodePoolManagement WorkloadIdentity `json:"nodePoolManagement"` + + // cloudProvider is the client ID of a federated managed identity, associated with azure-cloud-provider, used in + // workload identity authentication. + // +required + CloudProvider WorkloadIdentity `json:"cloudProvider"` +} + +// ManagedIdentity contains the client ID, and its certificate name, of a managed identity. This managed identity is +// used, by an HCP component, to authenticate with the Azure API. +type ManagedIdentity struct { + // clientID is the client ID of a managed identity associated with CredentialsSecretName. This field is optional and + // mainly used for CI purposes. + // + // +optional + ClientID AzureClientID `json:"clientID,omitempty"` + + // objectEncoding represents the encoding for the Azure Key Vault secret containing the certificate related to + // the managed identity. objectEncoding needs to match the encoding format used when the certificate was stored in the + // Azure Key Vault. If objectEncoding doesn't match the encoding format of the certificate, the certificate will + // unsuccessfully be read by the Secrets CSI driver and an error will occur. This error will only be visible on the + // SecretProviderClass custom resource related to the managed identity. + // + // The default value is utf-8. + // + // See this for more info - https://github.com/Azure/secrets-store-csi-driver-provider-azure/blob/master/website/content/en/getting-started/usage/_index.md + // + // +kubebuilder:validation:Enum:=utf-8;hex;base64 + // +kubebuilder:default:="utf-8" + // +required + ObjectEncoding ObjectEncodingFormat `json:"objectEncoding"` + + // credentialsSecretName is the name of an Azure Key Vault secret. This field assumes the secret contains the JSON + // format of a UserAssignedIdentityCredentials struct. At a minimum, the secret needs to contain the ClientId, + // ClientSecret, AuthenticationEndpoint, NotBefore, and NotAfter, and TenantId. + // + // More info on this struct can be found here - https://github.com/Azure/msi-dataplane/blob/63fb37d3a1aaac130120624674df795d2e088083/pkg/dataplane/internal/generated_client.go#L156. + // + // credentialsSecretName must be between 1 and 127 characters and use only alphanumeric characters and hyphens. + // credentialsSecretName must also be unique within the Azure Key Vault. See more details here - https://azure.github.io/PSRule.Rules.Azure/en/rules/Azure.KeyVault.SecretName/. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9-]+$` + // +required + CredentialsSecretName string `json:"credentialsSecretName"` +} + +// WorkloadIdentity is a struct that contains the client ID of a federated managed identity used in workload identity +// authentication. +type WorkloadIdentity struct { + // clientID is client ID of a federated managed identity used in workload identity authentication + // + // +required + ClientID AzureClientID `json:"clientID"` +} + +// ControlPlaneManagedIdentities contains the managed identities on the HCP control plane needing to authenticate with +// Azure's API. +type ControlPlaneManagedIdentities struct { + // managedIdentitiesKeyVault contains information on the management cluster's managed identities Azure Key Vault. + // This Key Vault is where the managed identities certificates are stored. These certificates are pulled out of the + // Key Vault by the Secrets Store CSI driver and mounted into a volume on control plane pods requiring + // authentication with Azure API. + // + // More information on how the Secrets Store CSI driver works to do this can be found here: + // https://learn.microsoft.com/en-us/azure/aks/csi-secrets-store-driver. + // + // +required + ManagedIdentitiesKeyVault ManagedAzureKeyVault `json:"managedIdentitiesKeyVault"` + + // cloudProvider is a pre-existing managed identity associated with the azure cloud provider, aka cloud controller + // manager. + // + // +required + CloudProvider ManagedIdentity `json:"cloudProvider"` + + // nodePoolManagement is a pre-existing managed identity associated with the operator managing the NodePools. + // + // +required + NodePoolManagement ManagedIdentity `json:"nodePoolManagement"` + + // controlPlaneOperator is a pre-existing managed identity associated with the control plane operator. + // + // +required + ControlPlaneOperator ManagedIdentity `json:"controlPlaneOperator"` + + // imageRegistry is a pre-existing managed identity associated with the cluster-image-registry-operator. + // + // +optional + ImageRegistry ManagedIdentity `json:"imageRegistry"` + + // ingress is a pre-existing managed identity associated with the cluster-ingress-operator. + // + // +required + Ingress ManagedIdentity `json:"ingress"` + + // network is a pre-existing managed identity associated with the cluster-network-operator. + // + // +required + Network ManagedIdentity `json:"network"` + + // disk is a pre-existing managed identity associated with the azure-disk-controller. + // + // +required + Disk ManagedIdentity `json:"disk"` + + // file is a pre-existing managed identity associated with the azure-disk-controller. + // + // +required + File ManagedIdentity `json:"file"` +} + +// DataPlaneManagedIdentities contains the client IDs of all the managed identities on the data plane needing to +// authenticate with Azure's API. +type DataPlaneManagedIdentities struct { + // imageRegistryMSIClientID is the client ID of a pre-existing managed identity ID associated with the image + //registry controller. + // + // +required + // +kubebuilder:validation:MaxLength=255 + ImageRegistryMSIClientID string `json:"imageRegistryMSIClientID"` + + // diskMSIClientID is the client ID of a pre-existing managed identity ID associated with the CSI Disk driver. + // + // +required + // +kubebuilder:validation:MaxLength=255 + DiskMSIClientID string `json:"diskMSIClientID"` + + // fileMSIClientID is the client ID of a pre-existing managed identity ID associated with the CSI File driver. + // + // +required + // +kubebuilder:validation:MaxLength=255 + FileMSIClientID string `json:"fileMSIClientID"` +} + +// AzureKMSSpec defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault +type AzureKMSSpec struct { + // activeKey defines the active key used to encrypt new secrets + // + // +required + ActiveKey AzureKMSKey `json:"activeKey"` + // backupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *AzureKMSKey `json:"backupKey,omitempty"` + + // kms is a pre-existing managed identity used to authenticate with Azure KMS. + // + // +required + KMS ManagedIdentity `json:"kms"` +} + +type AzureKMSKey struct { + // keyVaultName is the name of the keyvault. Must match criteria specified at https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#vault-name-and-object-name + // Your Microsoft Entra application used to create the cluster must be authorized to access this keyvault, e.g using the AzureCLI: + // `az keyvault set-policy -n $KEYVAULT_NAME --key-permissions decrypt encrypt --spn ` + // +kubebuilder:validation:MaxLength=255 + // +required + KeyVaultName string `json:"keyVaultName"` + + // keyName is the name of the keyvault key used for encrypt/decrypt + // +kubebuilder:validation:MaxLength=255 + // +required + KeyName string `json:"keyName"` + + // keyVersion contains the version of the key to use + // +kubebuilder:validation:MaxLength=255 + // +required + KeyVersion string `json:"keyVersion"` +} + +// AzureAuthenticationType is a discriminated union type that contains the Azure authentication configuration for an +// Azure Hosted Cluster. This type is used to determine which authentication configuration is being used. Valid values +// are "ManagedIdentities" and "WorkloadIdentities". +// +// +kubebuilder:validation:Enum=ManagedIdentities;WorkloadIdentities +type AzureAuthenticationType string + +const ( + // "ManagedIdentities" means that the Hosted Cluster is using managed identities to authenticate with Azure's API. + // This is only valid for managed Azure, also known as ARO HCP. + AzureAuthenticationTypeManagedIdentities AzureAuthenticationType = "ManagedIdentities" + + // "WorkloadIdentities" means that the Hosted Cluster is using workload identities to authenticate with Azure's API. + // This is only valid for self-managed Azure. + AzureAuthenticationTypeWorkloadIdentities AzureAuthenticationType = "WorkloadIdentities" +) + +// azureAuthenticationConfiguration is a discriminated union type that contains the Azure authentication configuration +// for a Hosted Cluster. This configuration is used to determine how the Hosted Cluster authenticates with Azure's API, +// either with managed identities or workload identities. +// +// +kubebuilder:validation:XValidation:rule="self.azureAuthenticationConfigType == 'ManagedIdentities' ? has(self.managedIdentities) : !has(self.managedIdentities)", message="managedIdentities is required when azureAuthenticationConfigType is ManagedIdentities, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.azureAuthenticationConfigType == 'WorkloadIdentities' ? has(self.workloadIdentities) : !has(self.workloadIdentities)", message="workloadIdentities is required when azureAuthenticationConfigType is WorkloadIdentities, and forbidden otherwise" +// +union +type AzureAuthenticationConfiguration struct { + // azureAuthenticationConfigType is the type of identity configuration used in the Hosted Cluster. This field is + // used to determine which identity configuration is being used. Valid values are "ManagedIdentities" and + // "WorkloadIdentities". + // + // +unionDiscriminator + // +required + AzureAuthenticationConfigType AzureAuthenticationType `json:"azureAuthenticationConfigType"` + + // managedIdentities contains the managed identities needed for HCP control plane and data plane components that + // authenticate with Azure's API. + // + // These are required for managed Azure, also known as ARO HCP. + // + // +optional + ManagedIdentities *AzureResourceManagedIdentities `json:"managedIdentities,omitempty"` + + // workloadIdentities is a struct of client IDs for each component that needs to authenticate with Azure's API in + // self-managed Azure. These client IDs are used to authenticate with Azure cloud on both the control plane and data + // plane. + // + // This is required for self-managed Azure. + // +optional + WorkloadIdentities *AzureWorkloadIdentities `json:"workloadIdentities,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go index 00d7ce48d7..12f8b872da 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go @@ -11,10 +11,17 @@ import ( // CertificateSigningRequestApproval defines the desired state of CertificateSigningRequestApproval type CertificateSigningRequestApproval struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // metadata is standard object metadata. + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + // spec is the specification of the desired behavior of the CertificateSigningRequestApproval. + // +optional + Spec CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + + // status is the most recently observed status of the CertificateSigningRequestApproval. + // +optional Status CertificateSigningRequestApprovalStatus `json:"status,omitempty"` } @@ -24,11 +31,16 @@ type CertificateSigningRequestApprovalSpec struct{} // CertificateSigningRequestApprovalStatus defines the observed state of CertificateSigningRequestApproval type CertificateSigningRequestApprovalStatus struct{} -// +kubebuilder:object:root=true - +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CertificateSigningRequestApprovalList contains a list of CertificateSigningRequestApprovals. type CertificateSigningRequestApprovalList struct { metav1.TypeMeta `json:",inline"` + // metadata is standard list metadata. + // +optional metav1.ListMeta `json:"metadata,omitempty"` - Items []CertificateSigningRequestApproval `json:"items"` + + // items is the list of CertificateSigningRequestApprovals. + // +required + // +kubebuilder:validation:MaxItems=1000 + Items []CertificateSigningRequestApproval `json:"items"` } diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go index 77f45afcbb..4a765689f9 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go @@ -13,3 +13,38 @@ func (c *ClusterConfiguration) GetNetwork() *configv1.NetworkSpec { retu func (c *ClusterConfiguration) GetOAuth() *configv1.OAuthSpec { return c.OAuth } func (c *ClusterConfiguration) GetScheduler() *configv1.SchedulerSpec { return c.Scheduler } func (c *ClusterConfiguration) GetProxy() *configv1.ProxySpec { return c.Proxy } + +func (c *ClusterConfiguration) GetTLSSecurityProfile() *configv1.TLSSecurityProfile { + if c != nil && c.APIServer != nil { + return c.APIServer.TLSSecurityProfile + } + return nil +} + +func (c *ClusterConfiguration) GetAutoAssignCIDRs() []string { + if c != nil && c.Network != nil && c.Network.ExternalIP != nil { + return c.Network.ExternalIP.AutoAssignCIDRs + } + return nil +} + +func (c *ClusterConfiguration) GetAuditPolicyConfig() configv1.Audit { + if c != nil && c.APIServer != nil && c.APIServer.Audit.Profile != "" { + return c.APIServer.Audit + } + return configv1.Audit{Profile: configv1.DefaultAuditProfileType} +} + +func (c *ClusterConfiguration) GetFeatureGateSelection() configv1.FeatureGateSelection { + if c != nil && c.FeatureGate != nil { + return c.FeatureGate.FeatureGateSelection + } + return configv1.FeatureGateSelection{FeatureSet: configv1.Default} +} + +func (c *ClusterConfiguration) GetNamedCertificates() []configv1.APIServerNamedServingCert { + if c != nil && c.APIServer != nil { + return c.APIServer.ServingCerts.NamedCertificates + } + return []configv1.APIServerNamedServingCert{} +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go new file mode 100644 index 0000000000..dd87132745 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go @@ -0,0 +1,111 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ControlPlaneComponent{}, + &ControlPlaneComponentList{}, + ) + return nil + }) +} + +const ( + // ControlPlaneComponentAvailable indicates whether the ControlPlaneComponent is available. + ControlPlaneComponentAvailable ConditionType = "Available" + // ControlPlaneComponentRolloutComplete indicates whether the ControlPlaneComponent has completed its rollout. + ControlPlaneComponentRolloutComplete ConditionType = "RolloutComplete" + + // WaitingForDependenciesReason indicates that there are unavailable dependencies blocking the ControlPlaneComponent reconciliation. + WaitingForDependenciesReason string = "WaitingForDependencies" + // ReconciliationErrorReason indicates that there was an error during the reconciliation of the ControlPlaneComponent. + ReconciliationErrorReason string = "ReconciliationError" +) + +// ControlPlaneComponentSpec defines the desired state of ControlPlaneComponent +type ControlPlaneComponentSpec struct { +} + +// ComponentResource defines a resource reconciled by a ControlPlaneComponent. +type ComponentResource struct { + // kind is the name of the resource schema. + // +required + // +kubebuilder:validation:MaxLength=255 + Kind string `json:"kind"` + + // group is the API group for this resource type. + // +required + // +kubebuilder:validation:MaxLength=255 + Group string `json:"group"` + + // name is the name of this resource. + // +required + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` +} + +// ControlPlaneComponentStatus defines the observed state of ControlPlaneComponent +type ControlPlaneComponentStatus struct { + // conditions contains details for the current state of the ControlPlane Component. + // If there is an error, then the Available condition will be false. + // + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +kubebuilder:validation:MaxItems=10 + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // version reports the current version of this component. + // +optional + // +kubebuilder:validation:MaxLength=255 + Version string `json:"version,omitempty"` + + // resources is a list of the resources reconciled by this component. + // +optional + // +kubebuilder:validation:MaxItems=100 + Resources []ComponentResource `json:"resources,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=controlplanecomponents,shortName=cpc;cpcs,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Version" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status",description="Available" +// +kubebuilder:printcolumn:name="Progressing",type="string",JSONPath=".status.conditions[?(@.type==\"Progressing\")].status",description="Progressing" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message",description="Message" +// +kubebuilder:printcolumn:name="ProgressingMessage",type="string",priority=1,JSONPath=".status.conditions[?(@.type==\"Progressing\")].message",description="ProgressingMessage" +// ControlPlaneComponent specifies the state of a ControlPlane Component +type ControlPlaneComponent struct { + metav1.TypeMeta `json:",inline"` + // metadata is the metadata for the ControlPlaneComponent. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec is the specification for the ControlPlaneComponent. + // +optional + Spec ControlPlaneComponentSpec `json:"spec,omitempty"` + // status is the status of the ControlPlaneComponent. + // +optional + Status ControlPlaneComponentStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// ControlPlaneComponentList contains a list of ControlPlaneComponent +type ControlPlaneComponentList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the metadata for the ControlPlaneComponentList. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // items is a list of ControlPlaneComponent. + // +required + // +kubebuilder:validation:MaxItems=1000 + Items []ControlPlaneComponent `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go index 7eeaf3eb76..9e2ab9b7f7 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go @@ -31,49 +31,67 @@ const ( // AWSEndpointServiceSpec defines the desired state of AWSEndpointService type AWSEndpointServiceSpec struct { - // The name of the NLB for which an Endpoint Service should be configured + // networkLoadBalancerName is the name of the NLB for which an Endpoint Service should be configured + // +kubebuilder:validation:MaxLength=255 + // NetworkLoadBalancerName is the name of the network load balancer. + // +required NetworkLoadBalancerName string `json:"networkLoadBalancerName"` - // SubnetIDs is the list of subnet IDs to which guest nodes can attach + // subnetIDs is the list of subnet IDs to which guest nodes can attach // +optional + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:items:MaxLength=63 SubnetIDs []string `json:"subnetIDs,omitempty"` - // Tags to apply to the EndpointService + // resourceTags is the list of tags to apply to the EndpointService // +optional + // +kubebuilder:validation:MaxItems=25 ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` } // AWSEndpointServiceStatus defines the observed state of AWSEndpointService type AWSEndpointServiceStatus struct { - // EndpointServiceName is the name of the Endpoint Service created in the + // conditions contains details for the current state of the Endpoint Service + // request If there is an error processing the request e.g. the NLB doesn't + // exist, then the Available condition will be false, reason AWSErrorReason, + // and the error reported in the message. + // + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +kubebuilder:validation:MaxItems=10 + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // endpointServiceName is the name of the Endpoint Service created in the // management VPC // +optional + // +kubebuilder:validation:MaxLength=255 EndpointServiceName string `json:"endpointServiceName,omitempty"` - // EndpointID is the ID of the Endpoint created in the guest VPC + // endpointID is the ID of the Endpoint created in the guest VPC // +optional + // +kubebuilder:validation:MaxLength=255 EndpointID string `json:"endpointID,omitempty"` - // DNSName are the names for the records created in the hypershift private zone + // dnsNames are the names for the records created in the hypershift private zone // +optional + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:items:MaxLength=253 DNSNames []string `json:"dnsNames,omitempty"` - // DNSZoneID is ID for the hypershift private zone + // dnsZoneID is ID for the hypershift private zone // +optional + // +kubebuilder:validation:MaxLength=255 DNSZoneID string `json:"dnsZoneID,omitempty"` - // Conditions contains details for the current state of the Endpoint Service - // request If there is an error processing the request e.g. the NLB doesn't - // exist, then the Available condition will be false, reason AWSErrorReason, - // and the error reported in the message. - // - // Current condition types are: "Available" + // securityGroupID is the ID for the VPC endpoint SecurityGroup + // +kubebuilder:validation:MaxLength=255 + // SecurityGroupID is the ID of the security group. // +optional - // +listType=map - // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty"` + SecurityGroupID string `json:"securityGroupID,omitempty"` } // +kubebuilder:object:root=true @@ -82,17 +100,28 @@ type AWSEndpointServiceStatus struct { // +kubebuilder:subresource:status // AWSEndpointService specifies a request for an Endpoint Service in AWS type AWSEndpointService struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // metadata is the metadata for the AWSEndpointService. + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AWSEndpointServiceSpec `json:"spec,omitempty"` + // spec is the specification for the AWSEndpointService. + // +optional + Spec AWSEndpointServiceSpec `json:"spec,omitempty"` + // status is the status of the AWSEndpointService. + // +optional Status AWSEndpointServiceStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true // AWSEndpointServiceList contains a list of AWSEndpointService +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSEndpointServiceList struct { metav1.TypeMeta `json:",inline"` + // metadata is standard list metadata. + // +optional metav1.ListMeta `json:"metadata,omitempty"` - Items []AWSEndpointService `json:"items"` + // items is a list of AWSEndpointService. + // +kubebuilder:validation:MaxItems=100 + // items is the list of AWSEndpointServices. + // +required + Items []AWSEndpointService `json:"items"` } diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go index 4fc32d35c6..3241a27e44 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go @@ -4,7 +4,6 @@ package v1beta1 import ( - "github.com/openshift/hypershift/api/hypershift" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -12,7 +11,7 @@ import ( var ( // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: hypershift.GroupName, Version: "v1beta1"} + GroupVersion = schema.GroupVersion{Group: "hypershift.openshift.io", Version: "v1beta1"} SchemeGroupVersion = GroupVersion diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go index 754e650045..add6fad2b3 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go @@ -1,11 +1,11 @@ package v1beta1 import ( + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - - configv1 "github.com/openshift/api/config/v1" ) func init() { @@ -26,21 +26,31 @@ func init() { // +kubebuilder:subresource:status // +kubebuilder:object:root=true type HostedControlPlane struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // metadata is the metadata for the HostedControlPlane. + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec HostedControlPlaneSpec `json:"spec,omitempty"` + // spec is the specification for the HostedControlPlane. + // +optional + Spec HostedControlPlaneSpec `json:"spec,omitempty"` + // status is the status of the HostedControlPlane. + // +optional Status HostedControlPlaneStatus `json:"status,omitempty"` } // HostedControlPlaneSpec defines the desired state of HostedControlPlane +// +kubebuilder:validation:XValidation:rule="self.platform.type == 'IBMCloud' ? size(self.services) >= 3 : size(self.services) >= 4",message="spec.services in body should have at least 4 items or 3 for IBMCloud" type HostedControlPlaneSpec struct { - // ReleaseImage is the release image applied to the hosted control plane. + // releaseImage is the release image applied to the hosted control plane. + // +required + // +kubebuilder:validation:MaxLength=255 ReleaseImage string `json:"releaseImage"` - // ControlPlaneReleaseImage specifies the desired OCP release payload for + // controlPlaneReleaseImage specifies the desired OCP release payload for // control plane components running on the management cluster. // If not defined, ReleaseImage is used + // +optional + // +kubebuilder:validation:MaxLength=255 ControlPlaneReleaseImage *string `json:"controlPlaneReleaseImage,omitempty"` // updateService may be used to specify the preferred upstream update service. @@ -54,35 +64,52 @@ type HostedControlPlaneSpec struct { // contain stable updates that are appropriate for production clusters. // // +optional + // +kubebuilder:validation:MaxLength=255 Channel string `json:"channel,omitempty"` + // pullSecret is a reference to a secret containing the pull secret for the hosted control plane. + // +required PullSecret corev1.LocalObjectReference `json:"pullSecret"` - // IssuerURL is an OIDC issuer URL which is used as the issuer in all + // issuerURL is an OIDC issuer URL which is used as the issuer in all // ServiceAccount tokens generated by the control plane API server. The // default value is kubernetes.default.svc, which only works for in-cluster // validation. + // +required + // +kubebuilder:validation:MaxLength=255 IssuerURL string `json:"issuerURL"` - // Networking specifies network configuration for the cluster. + // networking specifies network configuration for the cluster. // Temporarily optional for backward compatibility, required in future releases. // +optional Networking ClusterNetworking `json:"networking,omitempty"` + // sshKey is a reference to a secret containing the SSH key for the hosted control plane. + // +required SSHKey corev1.LocalObjectReference `json:"sshKey"` - // ClusterID is the unique id that identifies the cluster externally. + // clusterID is the unique id that identifies the cluster externally. // Making it optional here allows us to keep compatibility with previous // versions of the control-plane-operator that have no knowledge of this // field. // +optional + // +kubebuilder:validation:MaxLength=255 ClusterID string `json:"clusterID,omitempty"` - InfraID string `json:"infraID"` + // infraID is the unique id that identifies the cluster internally. + // +required + // +kubebuilder:validation:MaxLength=255 + InfraID string `json:"infraID"` + + // platform is the platform configuration for the cluster. + // +required Platform PlatformSpec `json:"platform"` - DNS DNSSpec `json:"dns"` - // ServiceAccountSigningKey is a reference to a secret containing the private key + // dns is the DNS configuration for the cluster. + // +required + DNS DNSSpec `json:"dns"` + + // serviceAccountSigningKey is a reference to a secret containing the private key // used by the service account token issuer. The secret is expected to contain // a single key named "key". If not specified, a service account signing key will // be generated automatically for the cluster. @@ -90,16 +117,16 @@ type HostedControlPlaneSpec struct { // +optional ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` - // ControllerAvailabilityPolicy specifies the availability policy applied to + // controllerAvailabilityPolicy specifies the availability policy applied to // critical control plane components. The default value is SingleReplica. // // +optional // +immutable // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ControllerAvailabilityPolicy is immutable" - // +kubebuilder:default:="SingleReplica" + // +kubebuilder:default:="HighlyAvailable" ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` - // InfrastructureAvailabilityPolicy specifies the availability policy applied + // infrastructureAvailabilityPolicy specifies the availability policy applied // to infrastructure services which run on cluster nodes. The default value is // SingleReplica. // @@ -107,19 +134,35 @@ type HostedControlPlaneSpec struct { // +kubebuilder:default:="SingleReplica" InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` - // FIPS specifies if the nodes for the cluster will be running in FIPS mode + // fips specifies if the nodes for the cluster will be running in FIPS mode // +optional FIPS bool `json:"fips"` - // KubeConfig specifies the name and key for the kubeconfig secret + // kubeconfig specifies the name and key for the kubeconfig secret // +optional KubeConfig *KubeconfigSecretRef `json:"kubeconfig,omitempty"` - // Services defines metadata about how control plane services are published + // kubeAPIServerDNSName specifies a desired DNS name to resolve to the KAS. + // When set, the controller will automatically generate a secret with kubeconfig and expose it in the hostedCluster Status.customKubeconfig field. + // If it's set or removed day 2, the kubeconfig generated secret will be created, recreated or deleted. + // The DNS entries should be resolvable from the cluster, so this should be manually configured in the DNS provider. + // This field works in conjunction with configuration.APIServer.ServingCerts.NamedCertificates to enable + // access to the API server via a custom domain name. The NamedCertificates provide the TLS certificates + // for the custom domain, while this field triggers the generation of a kubeconfig that uses those certificates. + // + // +kubebuilder:validation:XValidation:rule=`self == "" || self.matches('^(?:(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}|[a-zA-Z0-9-]+)$')`,message="kubeAPIServerDNSName must be a valid URL name (e.g., api.example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:example: "api.example.com" + // +optional + KubeAPIServerDNSName string `json:"kubeAPIServerDNSName,omitempty"` + + // services defines metadata about how control plane services are published // in the management cluster. + // +required + // +kubebuilder:validation:MaxItems=6 Services []ServicePublishingStrategyMapping `json:"services"` - // AuditWebhook contains metadata for configuring an audit webhook + // auditWebhook contains metadata for configuring an audit webhook // endpoint for a cluster to process cluster audit events. It references // a secret that contains the webhook information for the audit webhook endpoint. // It is a secret because if the endpoint has MTLS the kubeconfig will contain client @@ -128,36 +171,45 @@ type HostedControlPlaneSpec struct { // +optional AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` - // Etcd contains metadata about the etcd cluster the hypershift managed Openshift control plane components + // etcd contains metadata about the etcd cluster the hypershift managed Openshift control plane components // use to store data. + // +required Etcd EtcdSpec `json:"etcd"` - // Configuration embeds resources that correspond to the openshift configuration API: + // configuration embeds resources that correspond to the openshift configuration API: // https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html - // +kubebuilder:validation:Optional + // +optional Configuration *ClusterConfiguration `json:"configuration,omitempty"` - // ImageContentSources lists sources/repositories for the release-image content. + // operatorConfiguration specifies configuration for individual OCP operators in the cluster. + // // +optional + // +openshift:enable:FeatureGate=ClusterVersionOperatorConfiguration + OperatorConfiguration *OperatorConfiguration `json:"operatorConfiguration,omitempty"` + + // imageContentSources lists sources/repositories for the release-image content. + // +optional + // +kubebuilder:validation:MaxItems=255 ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` - // AdditionalTrustBundle references a ConfigMap containing a PEM-encoded X.509 certificate bundle + // additionalTrustBundle references a ConfigMap containing a PEM-encoded X.509 certificate bundle // +optional AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` - // SecretEncryption contains metadata about the kubernetes secret encryption strategy being used for the + // secretEncryption contains metadata about the kubernetes secret encryption strategy being used for the // cluster when applicable. // +optional SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` - // PausedUntil is a field that can be used to pause reconciliation on a resource. + // pausedUntil is a field that can be used to pause reconciliation on a resource. // Either a date can be provided in RFC3339 format or a boolean. If a date is // provided: reconciliation is paused on the resource until that date. If the boolean true is // provided: reconciliation is paused on the resource until the field is removed. // +optional + // +kubebuilder:validation:MaxLength=255 PausedUntil *string `json:"pausedUntil,omitempty"` - // OLMCatalogPlacement specifies the placement of OLM catalog components. By default, + // olmCatalogPlacement specifies the placement of OLM catalog components. By default, // this is set to management and OLM catalog components are deployed onto the management // cluster. If set to guest, the OLM catalog components will be deployed onto the guest // cluster. @@ -167,19 +219,51 @@ type HostedControlPlaneSpec struct { // +immutable OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` - // Autoscaling specifies auto-scaling behavior that applies to all NodePools + // autoscaling specifies auto-scaling behavior that applies to all NodePools // associated with the control plane. // // +optional Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` - // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. + // autoNode specifies the configuration for the autoNode feature. + // +openshift:enable:FeatureGate=AutoNodeKarpenter + // +optional + AutoNode *AutoNode `json:"autoNode,omitempty"` + + // nodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. // // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations when specified, define what custom tolerations are added to the hcp pods. + // + // +optional + // +kubebuilder:validation:MaxItems=25 + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // labels when specified, define what custom labels are added to the hcp pods. + // Changing this day 2 will cause a rollout of all hcp pods. + // Duplicate keys are not supported. If duplicate keys are defined, only the last key/value pair is preserved. + // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(key) <= 317 && key.matches('^(([A-Za-z0-9]+(\\.[A-Za-z0-9]+)?)*[A-Za-z0-9]\\/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$'))`, message="label key must have two segments: an optional prefix and name, separated by a slash (/). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (.), not longer than 253 characters in total, followed by a slash (/)" + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(self[key]) <= 63 && self[key].matches('^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$'))`, message="label value must be 63 characters or less (can be empty), consist of alphanumeric characters, dashes (-), underscores (_) or dots (.), and begin and end with an alphanumeric character" + // TODO: key/value validations break cost budget for <=4.17. We should figure why and enable it back. + // +kubebuilder:validation:MaxProperties=20 + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // capabilities allows for disabling optional components at cluster install time. + // This field is optional and once set cannot be changed. + // +immutable + // +optional + // +kubebuilder:default={} + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Capabilities is immutable. Changes might result in unpredictable and disruptive behavior." + Capabilities *Capabilities `json:"capabilities,omitempty"` } -// AvailabilityPolicy specifies a high level availability policy for components. +// availabilityPolicy specifies a high level availability policy for components. +// +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica type AvailabilityPolicy string const ( @@ -197,8 +281,14 @@ const ( ) type KubeconfigSecretRef struct { + // name is the name of the secret containing the kubeconfig. + // +required + // +kubebuilder:validation:MaxLength=255 Name string `json:"name"` - Key string `json:"key"` + // key is the key in the secret containing the kubeconfig. + // +required + // +kubebuilder:validation:MaxLength=255 + Key string `json:"key"` } type ConditionType string @@ -212,39 +302,51 @@ const ( // HostedControlPlaneStatus defines the observed state of HostedControlPlane type HostedControlPlaneStatus struct { - // Ready denotes that the HostedControlPlane API Server is ready to + // conditions contains details for one aspect of the current state of the HostedControlPlane. + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +kubebuilder:validation:MaxItems=100 + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ready denotes that the HostedControlPlane API Server is ready to // receive requests // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230 - // +kubebuilder:validation:Required + // +required // +kubebuilder:default=false Ready bool `json:"ready"` - // Initialized denotes whether or not the control plane has + // initialized denotes whether or not the control plane has // provided a kubeadm-config. // Once this condition is marked true, its value is never changed. See the Ready condition for an indication of // the current readiness of the cluster's control plane. // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252 - // +kubebuilder:validation:Required + // +required // +kubebuilder:default=false Initialized bool `json:"initialized"` - // ExternalManagedControlPlane indicates to cluster-api that the control plane + // externalManagedControlPlane indicates to cluster-api that the control plane // is managed by an external service. // https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468 + // +optional // +kubebuilder:default=true ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"` - // ControlPlaneEndpoint contains the endpoint information by which + // controlPlaneEndpoint contains the endpoint information by which // external clients can access the control plane. This is populated // after the infrastructure is ready. - // +kubebuilder:validation:Optional + // +optional ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"` - // OAuthCallbackURLTemplate contains a template for the URL to use as a callback + // oauthCallbackURLTemplate contains a template for the URL to use as a callback // for identity providers. The [identity-provider-name] placeholder must be replaced // with the name of an identity provider defined on the HostedCluster. // This is populated after the infrastructure is ready. - // +kubebuilder:validation:Optional + // +optional + // +kubebuilder:validation:MaxLength=255 OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"` // versionStatus is the status of the release version applied by the @@ -252,66 +354,77 @@ type HostedControlPlaneStatus struct { // +optional VersionStatus *ClusterVersionStatus `json:"versionStatus,omitempty"` - // Version is the semantic version of the release applied by + // version is the semantic version of the release applied by // the hosted control plane operator // // Deprecated: Use versionStatus.desired.version instead. - // +kubebuilder:validation:Optional + // +optional + // +kubebuilder:validation:MaxLength=255 Version string `json:"version,omitempty"` - // ReleaseImage is the release image applied to the hosted control plane. + // releaseImage is the release image applied to the hosted control plane. // // Deprecated: Use versionStatus.desired.image instead. // +optional + // +kubebuilder:validation:MaxLength=255 ReleaseImage string `json:"releaseImage,omitempty"` // lastReleaseImageTransitionTime is the time of the last update to the current // releaseImage property. // // Deprecated: Use versionStatus.history[0].startedTime instead. - // +kubebuilder:validation:Optional + // +optional LastReleaseImageTransitionTime *metav1.Time `json:"lastReleaseImageTransitionTime,omitempty"` - // KubeConfig is a reference to the secret containing the default kubeconfig + // kubeConfig is a reference to the secret containing the default kubeconfig // for this control plane. + // +optional KubeConfig *KubeconfigSecretRef `json:"kubeConfig,omitempty"` - // KubeadminPassword is a reference to the secret containing the initial kubeadmin password - // for the guest cluster. + // customKubeconfig references an external custom kubeconfig secret. + // This field is populated in the status when a custom kubeconfig secret has been generated + // for the hosted cluster. It contains the name and key of the secret located in the + // hostedCluster namespace. This field is only populated when kubeApiExternalName is set. + // If this field is removed during a day 2 operation, the referenced secret will be deleted + // and this field will be removed from the hostedCluster status. // +optional - KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` + CustomKubeconfig *KubeconfigSecretRef `json:"customKubeconfig,omitempty"` - // Condition contains details for one aspect of the current state of the HostedControlPlane. - // Current condition types are: "Available" + // kubeadminPassword is a reference to the secret containing the initial kubeadmin password + // for the guest cluster. // +optional - // +listType=map - // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty"` + KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` - // Platform contains platform-specific status of the HostedCluster + // platform contains platform-specific status of the HostedCluster // +optional Platform *PlatformStatus `json:"platform,omitempty"` + // nodeCount tracks the number of nodes in the HostedControlPlane. // +optional - - // NodeCount tracks the number of nodes in the HostedControlPlane. NodeCount *int `json:"nodeCount,omitempty"` } +// APIEndpoint represents a reachable Kubernetes API endpoint. type APIEndpoint struct { - // Host is the hostname on which the API server is serving. + // host is the hostname on which the API server is serving. + // +required + // +kubebuilder:validation:MaxLength=255 Host string `json:"host"` - // Port is the port on which the API server is serving. + // port is the port on which the API server is serving. + // +required Port int32 `json:"port"` } -// +kubebuilder:object:root=true // HostedControlPlaneList contains a list of HostedControlPlanes. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type HostedControlPlaneList struct { metav1.TypeMeta `json:",inline"` + // metadata is the metadata for the HostedControlPlaneList. + // +optional metav1.ListMeta `json:"metadata,omitempty"` - Items []HostedControlPlane `json:"items"` + // items is a list of HostedControlPlane. + // +required + // +kubebuilder:validation:MaxItems=100 + Items []HostedControlPlane `json:"items"` } diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go index 5c810022a9..541f839108 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go @@ -67,6 +67,8 @@ const ( ClusterVersionAvailable ConditionType = "ClusterVersionAvailable" // ClusterVersionReleaseAccepted bubbles up Failing ReleaseAccepted from the CVO. ClusterVersionReleaseAccepted ConditionType = "ClusterVersionReleaseAccepted" + // ClusterVersionRetrievedUpdates bubbles up RetrievedUpdates from the CVO. + ClusterVersionRetrievedUpdates ConditionType = "ClusterVersionRetrievedUpdates" // UnmanagedEtcdAvailable indicates whether a user-managed etcd cluster is // healthy. @@ -99,6 +101,11 @@ const ( // A failure here may require external user intervention to resolve. E.g. oidc was deleted out of band. ValidOIDCConfiguration ConditionType = "ValidOIDCConfiguration" + // ValidIDPConfiguration indicates if the Identity Provider configuration is valid. + // A failure here may require external user intervention to resolve + // e.g. the user-provided IDP configuration provided is invalid or the IDP is not reachable. + ValidIDPConfiguration ConditionType = "ValidIDPConfiguration" + // ValidReleaseImage indicates if the release image set in the spec is valid // for the HostedCluster. For example, this can be set false if the // HostedCluster itself attempts an unsupported version before 4.9 or an @@ -111,6 +118,10 @@ const ( // performance degradation due to fragmentation of the double encapsulation in ovn-kubernetes ValidKubeVirtInfraNetworkMTU ConditionType = "ValidKubeVirtInfraNetworkMTU" + // KubeVirtNodesLiveMigratable indicates if all nodes (VirtualMachines) of the kubevirt + // hosted cluster can be live migrated without experiencing a node restart + KubeVirtNodesLiveMigratable ConditionType = "KubeVirtNodesLiveMigratable" + // ValidAWSIdentityProvider indicates if the Identity Provider referenced // in the cloud credentials is healthy. E.g. for AWS the idp ARN is referenced in the iam roles. // "Version": "2012-10-17", @@ -165,6 +176,10 @@ const ( // A failure here often means a software bug or a non-stable cluster. ReconciliationSucceeded ConditionType = "ReconciliationSucceeded" + // EtcdRecoveryActive indicates that the Etcd cluster is failing and the + // recovery job was triggered. + EtcdRecoveryActive ConditionType = "EtcdRecoveryActive" + // ClusterSizeComputed indicates that a t-shirt size was computed for this HostedCluster. // The last transition time for this condition is used to manage how quickly transitions occur. ClusterSizeComputed = "ClusterSizeComputed" @@ -174,6 +189,12 @@ const ( ClusterSizeTransitionPending = "ClusterSizeTransitionPending" // ClusterSizeTransitionRequired exposes the next t-shirt size that the cluster will transition to. ClusterSizeTransitionRequired = "ClusterSizeTransitionRequired" + + // HostedClusterRestoredFromBackup indicates that the HostedCluster was restored from backup. + // This condition is set to true when the HostedCluster is restored from backup and the recovery process is complete. + // This condition is used to track the status of the recovery process and to determine if the HostedCluster + // is ready to be used after restoration. + HostedClusterRestoredFromBackup ConditionType = "HostedClusterRestoredFromBackup" ) // Reasons. @@ -192,6 +213,7 @@ const ( EtcdQuorumAvailableReason = "QuorumAvailable" EtcdWaitingForQuorumReason = "EtcdWaitingForQuorum" EtcdStatefulSetNotFoundReason = "StatefulSetNotFound" + EtcdRecoveryJobFailedReason = "EtcdRecoveryJobFailed" UnmanagedEtcdMisconfiguredReason = "UnmanagedEtcdMisconfigured" UnmanagedEtcdAsExpected = "UnmanagedEtcdAsExpected" @@ -206,6 +228,7 @@ const ( PlatformCredentialsNotFoundReason = "PlatformCredentialsNotFound" InvalidImageReason = "InvalidImage" InvalidIdentityProvider = "InvalidIdentityProvider" + PayloadArchNotFoundReason = "PayloadArchNotFound" InvalidIAMRoleReason = "InvalidIAMRole" @@ -222,6 +245,10 @@ const ( ReconciliationInvalidPausedUntilConditionReason = "InvalidPausedUntilValue" KubeVirtSuboptimalMTUReason = "KubeVirtSuboptimalMTUDetected" + + KubeVirtNodesLiveMigratableReason = "KubeVirtNodesNotLiveMigratable" + + RecoveryFinishedReason = "RecoveryFinished" ) // Messages. diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go index fe7d7afe9c..08536e2e15 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go @@ -4,14 +4,14 @@ import ( "fmt" "strings" + "github.com/openshift/hypershift/api/util/ipnet" + + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - - configv1 "github.com/openshift/api/config/v1" - - "github.com/openshift/hypershift/api/util/ipnet" ) func init() { @@ -21,6 +21,7 @@ func init() { &HostedClusterList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil }) } @@ -121,6 +122,13 @@ const ( // a HostedControlPlane. ClusterAPIPowerVSProviderImage = "hypershift.openshift.io/capi-provider-powervs-image" + // ClusterAPIOpenStackProviderImage overrides the CAPI OpenStack provider image to use for + // a HostedControlPlane. + ClusterAPIOpenStackProviderImage = "hypershift.openshift.io/capi-provider-openstack-image" + + // OpenStackResourceControllerImage overrides the ORC image to use for a HostedControlPlane. + OpenStackResourceControllerImage = "hypershift.openshift.io/orc-image" + // AESCBCKeySecretKey defines the Kubernetes secret key name that contains the aescbc encryption key // in the AESCBC secret encryption strategy AESCBCKeySecretKey = "key" @@ -130,8 +138,8 @@ const ( // AWSCredentialsFileSecretKey defines the Kubernetes secret key name that contains // the customer AWS credentials in the unmanaged authentication strategy for AWS KMS secret encryption AWSCredentialsFileSecretKey = "credentials" - // ControlPlaneComponent identifies a resource as belonging to a hosted control plane. - ControlPlaneComponent = "hypershift.openshift.io/control-plane-component" + // ControlPlaneComponentLabel identifies a resource as belonging to a hosted control plane. + ControlPlaneComponentLabel = "hypershift.openshift.io/control-plane-component" // OperatorComponent identifies a component as belonging to the operator. OperatorComponent = "hypershift.openshift.io/operator-component" @@ -204,7 +212,7 @@ const ( AllowUnsupportedKubeVirtRHCOSVariantsAnnotation = "hypershift.openshift.io/allow-unsupported-kubevirt-rhcos-variants" // ImageOverridesAnnotation is passed as a flag to the CPO to allow overriding release images. - // The format of the annotation value is a commma-separated list of image=ref pairs like: + // The format of the annotation value is a comma-separated list of image=ref pairs like: // cluster-network-operator=example.com/cno:latest,ovn-kubernetes=example.com/ovnkube:latest ImageOverridesAnnotation = "hypershift.openshift.io/image-overrides" @@ -259,14 +267,26 @@ const ( KubeAPIServerGOGCAnnotation = "hypershift.openshift.io/kube-apiserver-gogc" // KubeAPIServerGOMemoryLimitAnnotation allows modifying the kube-apiserver GOMEMLIMIT environment variable to increase - // the frequency of memory collection when memory used rises above a particular threshhold. This can be used to reduce + // the frequency of memory collection when memory used rises above a particular threshold. This can be used to reduce // the memory footprint of the kube-apiserver during upgrades. KubeAPIServerGOMemoryLimitAnnotation = "hypershift.openshift.io/kube-apiserver-gomemlimit" + // KubeAPIServerMaximumRequestsInFlight allows overriding the default value for the kube-apiserver max-requests-inflight + // flag. This allows controlling how many concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-requests-inflight" + + // KubeAPIServerMaximumMutatingRequestsInFlight allows overring the default value for the kube-apiserver max-mutating-requests-inflight + // flag. This allows controlling how many mutating concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumMutatingRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-mutating-requests-inflight" + // AWSLoadBalancerSubnetsAnnotation allows specifying the subnets to use for control plane load balancers - // in the AWS platform. + // in the AWS platform. These subnets only apply to private load balancers. AWSLoadBalancerSubnetsAnnotation = "hypershift.openshift.io/aws-load-balancer-subnets" + // AWSLoadBalancerTargetNodesAnnotation allows specifying label selectors to choose target nodes for + // control plane load balancers in the AWS platform. + AWSLoadBalancerTargetNodesAnnotation = "hypershift.openshift.io/aws-load-balancer-target-node-labels" + // DisableClusterAutoscalerAnnotation allows disabling the cluster autoscaler for a hosted cluster. // This annotation is only set by the hypershift-operator on HosterControlPlanes. // It is not set by the end-user. @@ -275,9 +295,6 @@ const ( // AroHCP represents the ARO HCP managed service offering AroHCP = "ARO-HCP" - // RosaHCP represents the ROSA HCP managed service offering - RosaHCP = "ROSA-HCP" - // HostedClusterSizeLabel is a label on HostedClusters indicating a size based on the number of nodes. HostedClusterSizeLabel = "hypershift.openshift.io/hosted-cluster-size" @@ -292,6 +309,11 @@ const ( // one on the NodePool takes precedence. The value is a go duration string with a number and a unit (ie. 8m, 1h, etc) MachineHealthCheckTimeoutAnnotation = "hypershift.openshift.io/machine-health-check-timeout" + // MachineHealthCheckNodeStartupTimeoutAnnotation allows overriding the default machine health check timeout for + // node startup on nodepools. The annotation can be set in either the HostedCluster or the NodePool. If set on both, the + // one on the NodePool takes precedence. The value is a go duration string with a number and a unit (ie. 8m, 1h, etc) + MachineHealthCheckNodeStartupTimeoutAnnotation = "hypershift.openshift.io/machine-health-check-node-startup-timeout" + // MachineHealthCheckMaxUnhealthyAnnotation allows overriding the max unhealthy value of the machine // health check created for a NodePool. The annotation can be set in either the HostedCluster or the NodePool. // If set on both, the one on the NodePool takes precedence. The value can be a number or a percentage value. @@ -300,159 +322,323 @@ const ( // ClusterSizeOverrideAnnotation allows overriding the value of the size label regardless of the number // of workers associated with the HostedCluster. The value should be the desired size label. ClusterSizeOverrideAnnotation = "hypershift.openshift.io/cluster-size-override" + + // KubeAPIServerVerbosityLevelAnnotation allows specifying the log verbosity of kube-apiserver. + KubeAPIServerVerbosityLevelAnnotation = "hypershift.openshift.io/kube-apiserver-verbosity-level" + + // NodePoolSupportsKubevirtTopologySpreadConstraintsAnnotation indicates if the NodePool currently supports + // using TopologySpreadConstraints on the KubeVirt VMs. + // + // Newer versions of the NodePool controller transitioned to spreading VMs across the cluster + // using TopologySpreadConstraints instead of Pod Anti-Affinity. When the new controller interacts + // with a older NodePool that was previously using pod anti-affinity, we don't want to immediately + // start using TopologySpreadConstraints because it will cause the MachineSet controller to update + // and replace all existing VMs. For example, it would be unexpected for a user to update the + // NodePool controller and for that to trigger a rolling update of all KubeVirt VMs. + // + // This annotation signals to the NodePool controller that it is safe to use TopologySpreadConstraints on a NodePool + // without triggering an unexpected update of KubeVirt VMs. + NodePoolSupportsKubevirtTopologySpreadConstraintsAnnotation = "hypershift.openshift.io/nodepool-supports-kubevirt-topology-spread-constraints" + + // IsKubeVirtRHCOSVolumeLabelName labels rhcos DataVolumes and PVCs, to be able to filter them, e.g. for backup + IsKubeVirtRHCOSVolumeLabelName = "hypershift.openshift.io/is-kubevirt-rhcos" + + // SkipControlPlaneNamespaceDeletionAnnotation tells the the hosted cluster controller not to delete the hosted control plane + // namespace during hosted cluster deletion when this annotation is set to the value "true". + SkipControlPlaneNamespaceDeletionAnnotation = "hypershift.openshift.io/skip-delete-hosted-controlplane-namespace" + + // DisableIgnitionServerAnnotation controls skipping of the ignition server deployment. + DisableIgnitionServerAnnotation = "hypershift.openshift.io/disable-ignition-server" + + // KubeAPIServerGoAwayChance allows the --goaway-chance parameter of the kube-apiserver to be overridden from its default of 0 + KubeAPIServerGoAwayChance = "hypershift.openshift.io/kube-apiserver-goaway-chance" + + // AWSMachinePublicIPs, if set to "true", results in an AWS machine template that creates machines with public IPs + // WARNING: This option is for development and testing purposes only + AWSMachinePublicIPs = "hypershift.openshift.io/aws-machine-public-ips" + + // HostedClusterRestoredFromBackupAnnotation is set to true when the HostedCluster is restored from a backup using Hypershift + // OADP plugin. This annotation is set by the Hypershift OADP plugin during the Backup/Restore process. The annotation will trigger + // a process to check if the different components in the DataPlane are working as expected. Checks: + // - Validates the monitoring stack is properly working after restoration, if not HCCO will restart the prometheus-k8s pods. + HostedClusterRestoredFromBackupAnnotation = "hypershift.openshift.io/restored-from-backup" +) + +// RetentionPolicy defines the policy for handling resources associated with a cluster when the cluster is deleted. +// +// +kubebuilder:validation:Enum:=Orphan;Prune +type RetentionPolicy string + +const ( + // OrphanRetentionPolicy will keep the resources associated with the cluster + // when the cluster is deleted. + OrphanRetentionPolicy RetentionPolicy = "Orphan" + + // PruneRetentionPolicy will delete the resources associated with the cluster + // when the cluster is deleted. + PruneRetentionPolicy RetentionPolicy = "Prune" ) +// +kubebuilder:validation:Enum=ImageRegistry;openshift-samples;Insights;baremetal;Console;NodeTuning;Ingress +type OptionalCapability string + +const ImageRegistryCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityImageRegistry) +const OpenShiftSamplesCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityOpenShiftSamples) +const InsightsCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityInsights) +const BaremetalCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityBaremetal) +const ConsoleCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityConsole) +const NodeTuningCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityNodeTuning) +const IngressCapability OptionalCapability = OptionalCapability(configv1.ClusterVersionCapabilityIngress) + +// capabilities allows enabling or disabling optional components at install time. +// When this is not supplied, the cluster will use the DefaultCapabilitySet defined for the respective +// OpenShift version, minus the baremetal capability. +// Once set, it cannot be changed. +// +// +kubebuilder:validation:XValidation:rule="has(self.enabled) && has(self.disabled) ? self.enabled.all(e, !(e in self.disabled)) : true", message="Capabilities can not be both enabled and disabled at once." +type Capabilities struct { + // enabled when specified, explicitly enables the specified capabilitíes on the hosted cluster. + // Once set, this field cannot be changed. + // + // +listType=atomic + // +immutable + // +optional + // +kubebuilder:validation:MaxItems=25 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Enabled is immutable. Changes might result in unpredictable and disruptive behavior." + Enabled []OptionalCapability `json:"enabled,omitempty"` + + // TODO: Remove the validation that requires the Ingress capability to be disabled only when Console is also disabled, once OCPBUGS-58422 is resolved by the console team + + // disabled when specified, explicitly disables the specified capabilitíes on the hosted cluster. + // Once set, this field cannot be changed. + // + // Note: Disabling 'openshift-samples','Insights', 'Console', 'NodeTuning', 'Ingress' are only supported in OpenShift versions 4.20 and above. + // + // +listType=atomic + // +immutable + // +optional + // +kubebuilder:validation:MaxItems=25 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Disabled is immutable. Changes might result in unpredictable and disruptive behavior." + // +kubebuilder:validation:XValidation:rule="!self.exists(cap, cap == 'Ingress') || self.exists(cap, cap == 'Console')",message="Ingress capability can only be disabled if Console capability is also disabled" + Disabled []OptionalCapability `json:"disabled,omitempty"` +} + // HostedClusterSpec is the desired behavior of a HostedCluster. + +// +kubebuilder:validation:XValidation:rule="self.platform.type == 'IBMCloud' ? size(self.services) >= 3 : size(self.services) >= 4",message="spec.services in body should have at least 4 items or 3 for IBMCloud" +// +kubebuilder:validation:XValidation:rule=`self.platform.type != "IBMCloud" ? self.services == oldSelf.services : true`, message="Services is immutable. Changes might result in unpredictable and disruptive behavior." +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "APIServer" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires APIServer Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "OAuthServer" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires OAuthServer Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "Konnectivity" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires Konnectivity Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "Ignition" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires Ignition Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`has(self.issuerURL) || !has(self.serviceAccountSigningKey)`,message="If serviceAccountSigningKey is set, issuerURL must be set" +// +kubebuilder:validation:XValidation:rule=`!self.services.exists(s, s.service == 'APIServer' && has(s.servicePublishingStrategy.loadBalancer) && s.servicePublishingStrategy.loadBalancer.hostname != "" && has(self.configuration) && has(self.configuration.apiServer) && self.configuration.apiServer.servingCerts.namedCertificates.exists(cert, cert.names.exists(n, n == s.servicePublishingStrategy.loadBalancer.hostname)))`, message="APIServer loadBalancer hostname cannot be in ClusterConfiguration.apiserver.servingCerts.namedCertificates[]" type HostedClusterSpec struct { - // Release specifies the desired OCP release payload for the hosted cluster. - // - // Updating this field will trigger a rollout of the control plane. The - // behavior of the rollout will be driven by the ControllerAvailabilityPolicy - // and InfrastructureAvailabilityPolicy. + // release specifies the desired OCP release payload for all the hosted cluster components. + // This includes those components running management side like the Kube API Server and the CVO but also the operands which land in the hosted cluster data plane like the ingress controller, ovn agents, etc. + // The maximum and minimum supported release versions are determined by the running Hypersfhit Operator. + // Attempting to use an unsupported version will result in the HostedCluster being degraded and the validateReleaseImage condition being false. + // Attempting to use a release with a skew against a NodePool release bigger than N-2 for the y-stream will result in leaving the NodePool in an unsupported state. + // Changing this field will trigger a rollout of the control plane components. + // The behavior of the rollout will be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy for PDBs and maxUnavailable and surce policies. + // +required Release Release `json:"release"` - // ControlPlaneRelease specifies the desired OCP release payload for - // control plane components running on the management cluster. - // Updating this field will trigger a rollout of the control plane. The - // behavior of the rollout will be driven by the ControllerAvailabilityPolicy - // and InfrastructureAvailabilityPolicy. - // If not defined, Release is used + // controlPlaneRelease is like spec.release but only for the components running on the management cluster. + // This excludes any operand which will land in the hosted cluster data plane. + // It is useful when you need to apply patch management side like a CVE, transparently for the hosted cluster. + // Version input for this field is free, no validation is performed against spec.release or maximum and minimum is performed. + // If defined, it will dicate the version of the components running management side, while spec.release will dictate the version of the components landing in the hosted cluster data plane. + // If not defined, spec.release is used for both. + // Changing this field will trigger a rollout of the control plane. + // The behavior of the rollout will be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy for PDBs and maxUnavailable and surce policies. // +optional ControlPlaneRelease *Release `json:"controlPlaneRelease,omitempty"` - // ClusterID uniquely identifies this cluster. This is expected to be - // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in - // hexadecimal values). - // As with a Kubernetes metadata.uid, this ID uniquely identifies this - // cluster in space and time. - // This value identifies the cluster in metrics pushed to telemetry and - // metrics produced by the control plane operators. If a value is not - // specified, an ID is generated. After initial creation, the value is - // immutable. - // +kubebuilder:validation:Pattern:="[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + // clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal digits). + // As with a Kubernetes metadata.uid, this ID uniquely identifies this cluster in space and time. + // This value identifies the cluster in metrics pushed to telemetry and metrics produced by the control plane operators. + // If a value is not specified, a random clusterID will be generated and set by the controller. + // Once set, this value is immutable. + // +kubebuilder:validation:XValidation:rule="self.matches('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')",message="clusterID must be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal digits)" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="clusterID is immutable" + // +kubebuilder:validation:MaxLength=36 + // +kubebuilder:validation:MinLength=36 // +optional ClusterID string `json:"clusterID,omitempty"` + // infraID is a globally unique identifier for the cluster. + // It must consist of lowercase alphanumeric characters and hyphens ('-') only, and start and end with an alphanumeric character. + // It must be no more than 253 characters in length. + // This identifier will be used to associate various cloud resources with the HostedCluster and its associated NodePools. + // infraID is used to compute and tag created resources with "kubernetes.io/cluster/"+hcluster.Spec.InfraID which has contractual meaning for the cloud provider implementations. + // If a value is not specified, a random infraID will be generated and set by the controller. + // Once set, this value is immutable. + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="infraID must consist of lowercase alphanumeric characters or '-', start and end with an alphanumeric character, and be between 1 and 253 characters" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="infraID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +optional + InfraID string `json:"infraID,omitempty"` + // updateService may be used to specify the preferred upstream update service. - // By default it will use the appropriate update service for the cluster and region. - // + // If omitted we will use the appropriate update service for the cluster and region. + // This is used by the control plane operator to determine and signal the appropriate available upgrades in the hostedCluster.status. + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="updateService must be a valid absolute URL" // +optional UpdateService configv1.URL `json:"updateService,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be - // contain stable updates that are appropriate for production clusters. - // + // channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. + // If omitted no particular upgrades are suggested. + // TODO(alberto): Consider the backend to use the default channel by default. Default channel will contain stable updates that are appropriate for production clusters. + // +kubebuilder:validation:MaxLength=100 + // +kubebuilder:validation:MinLength=1 // +optional Channel string `json:"channel,omitempty"` - // InfraID is a globally unique identifier for the cluster. This identifier - // will be used to associate various cloud resources with the HostedCluster - // and its associated NodePools. - // - // +optional - // +immutable - InfraID string `json:"infraID,omitempty"` - - // Platform specifies the underlying infrastructure provider for the cluster + // platform specifies the underlying infrastructure provider for the cluster // and is used to configure platform specific behavior. - // - // +immutable + // +required Platform PlatformSpec `json:"platform"` - // ControllerAvailabilityPolicy specifies the availability policy applied to - // critical control plane components. The default value is HighlyAvailable. - // - // +optional + // kubeAPIServerDNSName specifies a desired DNS name to resolve to the KAS. + // When set, the controller will automatically generate a secret with kubeconfig and expose it in the hostedCluster Status.customKubeconfig field. + // If it's set or removed day 2, the kubeconfig generated secret will be created, recreated or deleted. + // The DNS entries should be resolvable from the cluster, so this should be manually configured in the DNS provider. + // This field works in conjunction with configuration.APIServer.ServingCerts.NamedCertificates to enable + // access to the API server via a custom domain name. The NamedCertificates provide the TLS certificates + // for the custom domain, while this field triggers the generation of a kubeconfig that uses those certificates. + // This API endpoint only works in OCP version 4.19 or later. Older versions will result in a no-op. + // +kubebuilder:validation:XValidation:rule=`self == "" || self.matches('^(?:(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}|[a-zA-Z0-9-]+)$')`,message="kubeAPIServerDNSName must be a valid URL name (e.g., api.example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:example: "api.example.com" + // +optional + KubeAPIServerDNSName string `json:"kubeAPIServerDNSName,omitempty"` + + // controllerAvailabilityPolicy specifies the availability policy applied to critical control plane components like the Kube API Server. + // Possible values are HighlyAvailable and SingleReplica. The default value is HighlyAvailable. + // This field is immutable. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ControllerAvailabilityPolicy is immutable" // +kubebuilder:default:="HighlyAvailable" - // +immutable ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` - // InfrastructureAvailabilityPolicy specifies the availability policy applied - // to infrastructure services which run on cluster nodes. The default value is - // SingleReplica. - // + // infrastructureAvailabilityPolicy specifies the availability policy applied to infrastructure services which run on the hosted cluster data plane like the ingress controller and image registry controller. + // Possible values are HighlyAvailable and SingleReplica. The default value is SingleReplica. // +optional // +kubebuilder:default:="SingleReplica" - // +immutable InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` - // DNS specifies DNS configuration for the cluster. - // - // +immutable + // dns specifies the DNS configuration for the hosted cluster ingress. + // +optional DNS DNSSpec `json:"dns,omitempty"` - // Networking specifies network configuration for the cluster. - // - // +immutable + // networking specifies network configuration for the hosted cluster. + // Defaults to OVNKubernetes with a cluster network of cidr: "10.132.0.0/14" and a service network of cidr: "172.31.0.0/16". + // +required // +kubebuilder:default={networkType: "OVNKubernetes", clusterNetwork: {{cidr: "10.132.0.0/14"}}, serviceNetwork: {{cidr: "172.31.0.0/16"}}} Networking ClusterNetworking `json:"networking"` - // Autoscaling specifies auto-scaling behavior that applies to all NodePools - // associated with the control plane. + // autoscaling specifies auto-scaling behavior that applies to all NodePools + // associated with this HostedCluster. // // +optional Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` - // Etcd specifies configuration for the control plane etcd cluster. The - // default ManagementType is Managed. Once set, the ManagementType cannot be + // autoNode specifies the configuration for the autoNode feature. + // +openshift:enable:FeatureGate=AutoNodeKarpenter + // +optional + AutoNode *AutoNode `json:"autoNode,omitempty"` + + // etcd specifies configuration for the control plane etcd cluster. The + // default managementType is Managed. Once set, the managementType cannot be // changed. // - // +kubebuilder:validation:Optional // +kubebuilder:default={managementType: "Managed", managed: {storage: {type: "PersistentVolume", persistentVolume: {size: "8Gi"}}}} + // +required // +immutable Etcd EtcdSpec `json:"etcd"` - // Services specifies how individual control plane services are published from - // the hosting cluster of the control plane. - // - // If a given service is not present in this list, it will be exposed publicly - // by default. + // services specifies how individual control plane services endpoints are published for consumption. + // This requires APIServer;OAuthServer;Konnectivity;Ignition. + // This field is immutable for all platforms but IBMCloud. + // Max is 6 to account for OIDC;OVNSbDb for backward compatibility though they are no-op. + // + // +kubebuilder:validation:MaxItems=6 + // +kubebuilder:validation:ListType=atomic + // -kubebuilder:validation:XValidation:rule="self.all(s, !(s.service == 'APIServer' && s.servicePublishingStrategy.type == 'Route') || has(s.servicePublishingStrategy.route.hostname))",message="If serviceType is 'APIServer' and publishing strategy is 'Route', then hostname must be set" + // -kubebuilder:validation:XValidation:rule="self.platform.type == 'IBMCloud' ? ['APIServer', 'OAuthServer', 'Konnectivity'].all(requiredType, self.exists(s, s.service == requiredType))",message="Services list must contain at least 'APIServer', 'OAuthServer', and 'Konnectivity' service types" : ['APIServer', 'OAuthServer', 'Konnectivity', 'Ignition'].all(requiredType, self.exists(s, s.service == requiredType))",message="Services list must contain at least 'APIServer', 'OAuthServer', 'Konnectivity', and 'Ignition' service types" + // -kubebuilder:validation:XValidation:rule="self.filter(s, s.servicePublishingStrategy.type == 'Route' && has(s.servicePublishingStrategy.route) && has(s.servicePublishingStrategy.route.hostname)).all(x, self.filter(y, y.servicePublishingStrategy.type == 'Route' && (has(y.servicePublishingStrategy.route) && has(y.servicePublishingStrategy.route.hostname) && y.servicePublishingStrategy.route.hostname == x.servicePublishingStrategy.route.hostname)).size() <= 1)",message="Each route publishingStrategy 'hostname' must be unique within the Services list." + // -kubebuilder:validation:XValidation:rule="self.filter(s, s.servicePublishingStrategy.type == 'NodePort' && has(s.servicePublishingStrategy.nodePort) && has(s.servicePublishingStrategy.nodePort.address) && has(s.servicePublishingStrategy.nodePort.port)).all(x, self.filter(y, y.servicePublishingStrategy.type == 'NodePort' && (has(y.servicePublishingStrategy.nodePort) && has(y.servicePublishingStrategy.nodePort.address) && y.servicePublishingStrategy.nodePort.address == x.servicePublishingStrategy.nodePort.address && has(y.servicePublishingStrategy.nodePort.port) && y.servicePublishingStrategy.nodePort.port == x.servicePublishingStrategy.nodePort.port )).size() <= 1)",message="Each nodePort publishingStrategy 'nodePort' and 'hostname' must be unique within the Services list." + // TODO(alberto): this breaks the cost budget for < 4.17. We should figure why and enable it back. And If not fixable, consider imposing a minimum version on the management cluster. + // +required + // +immutable Services []ServicePublishingStrategyMapping `json:"services"` - // PullSecret references a pull secret to be injected into the container - // runtime of all cluster nodes. The secret must have a key named - // ".dockerconfigjson" whose value is the pull secret JSON. + // pullSecret is a local reference to a Secret that must have a ".dockerconfigjson" key whose content must be a valid Openshift pull secret JSON. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // This pull secret will be part of every payload generated by the controllers for any NodePool of the HostedCluster + // and it will be injected into the container runtime of all NodePools. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // Changing the content of the secret inplace will not trigger a rollout and might result in unpredictable behaviour. + // +required + // +rollout + // TODO(alberto): have our own local reference type to include our opinions and avoid transparent changes. PullSecret corev1.LocalObjectReference `json:"pullSecret"` - // SSHKey references an SSH key to be injected into all cluster node sshd - // servers. The secret must have a single key "id_rsa.pub" whose value is the - // public part of an SSH key. - // - // +immutable + // sshKey is a local reference to a Secret that must have a "id_rsa.pub" key whose content must be the public part of 1..N SSH keys. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // When sshKey is set, the controllers will generate a machineConfig with the sshAuthorizedKeys https://coreos.github.io/ignition/configuration-v3_2/ populated with this value. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // +rollout + // +optional SSHKey corev1.LocalObjectReference `json:"sshKey"` - // IssuerURL is an OIDC issuer URL which is used as the issuer in all - // ServiceAccount tokens generated by the control plane API server. The - // default value is kubernetes.default.svc, which only works for in-cluster + // issuerURL is an OIDC issuer URL which will be used as the issuer in all + // ServiceAccount tokens generated by the control plane API server via --service-account-issuer kube api server flag. + // https://k8s-docs.netlify.app/en/docs/reference/command-line-tools-reference/kube-apiserver/ + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#serviceaccount-token-volume-projection + // The default value is kubernetes.default.svc, which only works for in-cluster // validation. - // + // If the platform is AWS and this value is set, the controller will update an s3 object with the appropriate OIDC documents (using the serviceAccountSigningKey info) into that issuerURL. + // The expectation is for this s3 url to be backed by an OIDC provider in the AWS IAM. // +kubebuilder:default:="https://kubernetes.default.svc" // +immutable // +optional - // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="issuerURL is immutable" + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="issuerURL must be a valid absolute URL" IssuerURL string `json:"issuerURL,omitempty"` - // ServiceAccountSigningKey is a reference to a secret containing the private key - // used by the service account token issuer. The secret is expected to contain - // a single key named "key". If not specified, a service account signing key will - // be generated automatically for the cluster. When specifying a service account - // signing key, a IssuerURL must also be specified. + // serviceAccountSigningKey is a local reference to a secret that must have a "key" key whose content must be the private key + // used by the service account token issuer. + // If not specified, a service account signing key will + // be generated automatically for the cluster. + // When specifying a service account signing key, an IssuerURL must also be specified. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. // // +immutable - // +kubebuilder:validation:Optional // +optional ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` - // Configuration specifies configuration for individual OCP components in the + // configuration specifies configuration for individual OCP components in the // cluster, represented as embedded resources that correspond to the openshift // configuration API. // - // +kubebuilder:validation:Optional // +optional Configuration *ClusterConfiguration `json:"configuration,omitempty"` - // AuditWebhook contains metadata for configuring an audit webhook endpoint + // operatorConfiguration specifies configuration for individual OCP operators in the cluster. + // + // +optional + // +openshift:enable:FeatureGate=ClusterVersionOperatorConfiguration + OperatorConfiguration *OperatorConfiguration `json:"operatorConfiguration,omitempty"` + + // auditWebhook contains metadata for configuring an audit webhook endpoint // for a cluster to process cluster audit events. It references a secret that // contains the webhook information for the audit webhook endpoint. It is a // secret because if the endpoint has mTLS the kubeconfig will contain client @@ -463,41 +649,49 @@ type HostedClusterSpec struct { // +immutable AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` - // ImageContentSources specifies image mirrors that can be used by cluster + // imageContentSources specifies image mirrors that can be used by cluster // nodes to pull content. - // + // When imageContentSources is set, the controllers will generate a machineConfig. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. // +optional - // +immutable + // +kubebuilder:validation:MaxItems=255 ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` - // AdditionalTrustBundle is a reference to a ConfigMap containing a - // PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes - // + // additionalTrustBundle is a local reference to a ConfigMap that must have a "ca-bundle.crt" key + // whose content must be a PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // This will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. // +optional AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` - // SecretEncryption specifies a Kubernetes secret encryption strategy for the + // secretEncryption specifies a Kubernetes secret encryption strategy for the // control plane. // // +optional SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` - // FIPS indicates whether this cluster's nodes will be running in FIPS mode. + // fips indicates whether this cluster's nodes will be running in FIPS mode. // If set to true, the control plane's ignition server will be configured to // expect that nodes joining the cluster will be FIPS-enabled. - // + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="fips is immutable" // +optional // +immutable FIPS bool `json:"fips"` - // PausedUntil is a field that can be used to pause reconciliation on a resource. - // Either a date can be provided in RFC3339 format or a boolean. If a date is + // pausedUntil is a field that can be used to pause reconciliation on the HostedCluster controller, resulting in any change to the HostedCluster being ignored. + // Either a date can be provided in RFC3339 format or a boolean as in 'true', 'false', 'True', 'False'. If a date is // provided: reconciliation is paused on the resource until that date. If the boolean true is // provided: reconciliation is paused on the resource until the field is removed. + // +kubebuilder:validation:MaxLength=35 + // +kubebuilder:validation:MinLength=4 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*$') || self in ['true', 'false', 'True', 'False']`,message="PausedUntil must be a date in RFC3339 format or 'True', 'true', 'False' or 'false'" // +optional PausedUntil *string `json:"pausedUntil,omitempty"` - // OLMCatalogPlacement specifies the placement of OLM catalog components. By default, + // olmCatalogPlacement specifies the placement of OLM catalog components. By default, // this is set to management and OLM catalog components are deployed onto the management // cluster. If set to guest, the OLM catalog components will be deployed onto the guest // cluster. @@ -508,10 +702,39 @@ type HostedClusterSpec struct { // +immutable OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` - // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. - // + // nodeSelector when specified, is propagated to all control plane Deployments and Stateful sets running management side. + // It must be satisfied by the management Nodes for the pods to be scheduled. Otherwise the HostedCluster will enter a degraded state. + // Changes to this field will propagate to existing Deployments and StatefulSets. + // +kubebuilder:validation:XValidation:rule="size(self) <= 20",message="nodeSelector map can have at most 20 entries" + // TODO(alberto): add additional validation for the map key/values. // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations when specified, define what custom tolerations are added to the hcp pods. + // + // +optional + // +kubebuilder:validation:MaxItems=25 + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // labels when specified, define what custom labels are added to the hcp pods. + // Changing this day 2 will cause a rollout of all hcp pods. + // Duplicate keys are not supported. If duplicate keys are defined, only the last key/value pair is preserved. + // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(key) <= 317 && key.matches('^(([A-Za-z0-9]+(\\.[A-Za-z0-9]+)?)*[A-Za-z0-9]\\/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$'))`, message="label key must have two segments: an optional prefix and name, separated by a slash (/). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (.), not longer than 253 characters in total, followed by a slash (/)" + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(self[key]) <= 63 && self[key].matches('^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$'))`, message="label value must be 63 characters or less (can be empty), consist of alphanumeric characters, dashes (-), underscores (_) or dots (.), and begin and end with an alphanumeric character" + // TODO: key/value validations break cost budget for <=4.17. We should figure why and enable it back. + // +kubebuilder:validation:MaxProperties=20 + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // capabilities allows for disabling optional components at cluster install time. + // This field is optional and once set cannot be changed. + // +immutable + // +optional + // +kubebuilder:default={} + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Capabilities is immutable. Changes might result in unpredictable and disruptive behavior." + Capabilities *Capabilities `json:"capabilities,omitempty"` } // OLMCatalogPlacement is an enum specifying the placement of OLM catalog components. @@ -553,47 +776,69 @@ func (olm *OLMCatalogPlacement) Type() string { // the pullspec matches Source then one of the Mirrors are substituted as hosts // in the pullspec and tried in order to fetch the image. type ImageContentSource struct { - // Source is the repository that users refer to, e.g. in image pull + // source is the repository that users refer to, e.g. in image pull // specifications. // // +immutable + // +kubebuilder:validation:MaxLength=255 + // +required Source string `json:"source"` - // Mirrors are one or more repositories that may also contain the same images. + // mirrors are one or more repositories that may also contain the same images. // // +optional // +immutable + // +kubebuilder:validation:MaxItems=255 + // +listType=set + // +kubebuilder:validation:items:MaxLength=255 Mirrors []string `json:"mirrors,omitempty"` } -// ServicePublishingStrategyMapping specifies how individual control plane -// services are published from the hosting cluster of a control plane. +// ServicePublishingStrategyMapping specifies how individual control plane services endpoints are published for consumption. +// This includes APIServer;OAuthServer;Konnectivity;Ignition. +// If a given service is not present in this list, it will be exposed publicly by default. type ServicePublishingStrategyMapping struct { - // Service identifies the type of service being published. + // service identifies the type of service being published. + // It can be APIServer;OAuthServer;Konnectivity;Ignition + // OVNSbDb;OIDC are no-op and kept for backward compatibility. + // This field is immutable. // // +kubebuilder:validation:Enum=APIServer;OAuthServer;OIDC;Konnectivity;Ignition;OVNSbDb // +immutable + // +required Service ServiceType `json:"service"` - // ServicePublishingStrategy specifies how to publish Service. + // servicePublishingStrategy specifies how to publish a service endpoint. + // +required ServicePublishingStrategy `json:"servicePublishingStrategy"` } -// ServicePublishingStrategy specfies how to publish a ServiceType. +// ServicePublishingStrategy specifies how to publish a ServiceType. +// +kubebuilder:validation:XValidation:rule="self.type == 'NodePort' ? has(self.nodePort) : !has(self.nodePort)",message="nodePort is required when type is NodePort, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'Route' ? !has(self.nodePort) && !has(self.loadBalancer) : !has(self.route)",message="only route is allowed when type is Route, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'LoadBalancer' ? !has(self.nodePort) && !has(self.route) : !has(self.loadBalancer)",message="only loadBalancer is required when type is LoadBalancer, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'None' ? !has(self.nodePort) && !has(self.route) && !has(self.loadBalancer) : true",message="None does not allowed any configuration for loadBalancer, nodePort, or route" +// +kubebuilder:validation:XValidation:rule="self.type == 'S3' ? !has(self.nodePort) && !has(self.route) && !has(self.loadBalancer) : true",message="S3 does not allowed any configuration for loadBalancer, nodePort, or route" + type ServicePublishingStrategy struct { - // Type is the publishing strategy used for the service. + // type is the publishing strategy used for the service. + // It can be LoadBalancer;NodePort;Route;None;S3 // // +kubebuilder:validation:Enum=LoadBalancer;NodePort;Route;None;S3 - // +immutable + // +required Type PublishingStrategyType `json:"type"` - // NodePort configures exposing a service using a NodePort. + // nodePort configures exposing a service using a NodePort. + // +optional NodePort *NodePortPublishingStrategy `json:"nodePort,omitempty"` - // LoadBalancer configures exposing a service using a LoadBalancer. + // loadBalancer configures exposing a service using a dedicated LoadBalancer. + // +optional LoadBalancer *LoadBalancerPublishingStrategy `json:"loadBalancer,omitempty"` - // Route configures exposing a service using a Route. + // route configures exposing a service using a Route through and an ingress controller behind a cloud Load Balancer. + // The specifics of the setup are platform dependent. + // +optional Route *RoutePublishingStrategy `json:"route,omitempty"` } @@ -627,117 +872,175 @@ var ( // OAuthServer is the control plane OAuth service. OAuthServer ServiceType = "OAuthServer" - // OIDC is the control plane OIDC service. - OIDC ServiceType = "OIDC" - // Ignition is the control plane ignition service for nodes. Ignition ServiceType = "Ignition" // OVNSbDb is the optional control plane ovn southbound database service used by OVNKubernetes CNI. + // Deprecated: This service is no longer used by OVNKubernetes CNI for >= 4.14. OVNSbDb ServiceType = "OVNSbDb" + + // OIDC is the control plane OIDC service. + // Deprecated: This service is no longer used by the control plane. + OIDC ServiceType = "OIDC" ) // NodePortPublishingStrategy specifies a NodePort used to expose a service. type NodePortPublishingStrategy struct { - // Address is the host/ip that the NodePort service is exposed over. + // address is the host/ip that the NodePort service is exposed over. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule=`self.matches('^(([a-zA-Z0-9][-a-zA-Z0-9]*\\.)+[a-zA-Z]{2,}|localhost)$') || self.matches('^((\\d{1,3}\\.){3}\\d{1,3})$') || self.matches('^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$')`, message="address must be a valid hostname, IPv4, or IPv6 address" + // +required Address string `json:"address"` - // Port is the port of the NodePort service. If <=0, the port is dynamically + // port is the port of the NodePort service. If <=0, the port is dynamically // assigned when the service is created. + // +optional Port int32 `json:"port,omitempty"` } // LoadBalancerPublishingStrategy specifies setting used to expose a service as a LoadBalancer. type LoadBalancerPublishingStrategy struct { - // Hostname is the name of the DNS record that will be created pointing to the LoadBalancer. + // hostname is the name of the DNS record that will be created pointing to the LoadBalancer and passed through to consumers of the service. + // If omitted, the value will be inferred from the corev1.Service Load balancer type .status. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}$')`,message="hostname must be a valid domain name (e.g., example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +optional Hostname string `json:"hostname,omitempty"` } // RoutePublishingStrategy specifies options for exposing a service as a Route. type RoutePublishingStrategy struct { - // Hostname is the name of the DNS record that will be created pointing to the Route. + // hostname is the name of the DNS record that will be created pointing to the Route and passed through to consumers of the service. + // If omitted, the value will be inferred from management ingress.Spec.Domain. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}$')`,message="hostname must be a valid domain name (e.g., example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +optional Hostname string `json:"hostname,omitempty"` } -// DNSSpec specifies the DNS configuration in the cluster. +// DNSSpec specifies the DNS configuration for the hosted cluster ingress. type DNSSpec struct { - // BaseDomain is the base domain of the cluster. - // + // baseDomain is the base domain of the hosted cluster. + // It will be used to configure ingress in the hosted cluster through the subdomain baseDomainPrefix.baseDomain. + // If baseDomainPrefix is omitted, the hostedCluster.name will be used as the subdomain. + // Once set, this field is immutable. + // When the value is the empty string "", the controller might default to a value depending on the platform. + // +kubebuilder:validation:XValidation:rule=`self == "" || self.matches('^(?:(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}|[a-zA-Z0-9-]+)$')`,message="baseDomain must be a valid domain name (e.g., example, example.com, sub.example.com)" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="baseDomain is immutable" + // +kubebuilder:validation:MaxLength=253 // +immutable + // +required BaseDomain string `json:"baseDomain"` - // BaseDomainPrefix is the base domain prefix of the cluster. - // defaults to clusterName if not set. Set it to "" if you don't want a prefix to be prepended to BaseDomain. - // + // baseDomainPrefix is the base domain prefix for the hosted cluster ingress. + // It will be used to configure ingress in the hosted cluster through the subdomain baseDomainPrefix.baseDomain. + // If baseDomainPrefix is omitted, the hostedCluster.name will be used as the subdomain. + // Set baseDomainPrefix to an empty string "", if you don't want a prefix at all (not even hostedCluster.name) to be prepended to baseDomain. + // +kubebuilder:validation:XValidation:rule=`self == "" || self.matches('^(?:(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}|[a-zA-Z0-9-]+)$')`,message="baseDomainPrefix must be a valid domain name (e.g., example, example.com, sub.example.com)" + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPrefix is immutable" + // +kubebuilder:validation:MaxLength=253 // +optional - // +immutable BaseDomainPrefix *string `json:"baseDomainPrefix,omitempty"` - // PublicZoneID is the Hosted Zone ID where all the DNS records that are - // publicly accessible to the internet exist. - // + // publicZoneID is the Hosted Zone ID where all the DNS records that are publicly accessible to the internet exist. + // This field is optional and mainly leveraged in cloud environments where the DNS records for the .baseDomain are created by controllers in this zone. + // Once set, this value is immutable. // +optional + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="publicZoneID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +immutable PublicZoneID string `json:"publicZoneID,omitempty"` - // PrivateZoneID is the Hosted Zone ID where all the DNS records that are only - // available internally to the cluster exist. - // + // privateZoneID is the Hosted Zone ID where all the DNS records that are only available internally to the cluster exist. + // This field is optional and mainly leveraged in cloud environments where the DNS records for the .baseDomain are created by controllers in this zone. + // Once set, this value is immutable. // +optional + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="privateZoneID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +immutable PrivateZoneID string `json:"privateZoneID,omitempty"` } -// ClusterNetworking specifies network configuration for a cluster. +// clusterNetworking specifies network configuration for a cluster. +// All CIDRs must be unique. Additional validation to check for CIDRs overlap and consistent network stack is performed by the controllers. +// Failing that validation will result in the HostedCluster being degraded and the validConfiguration condition being false. +// TODO this is available in vanilla kube from 1.31 API servers and in Openshift from 4.16. +// TODO(alberto): Use CEL cidr library for all these validation when all management clusters are >= 1.31. +// +kubebuilder:validation:XValidation:rule="(!has(self.machineNetwork) && self.clusterNetwork.all(c, self.serviceNetwork.all(s, c.cidr != s.cidr)) || (has(self.machineNetwork) && (self.machineNetwork.all(m, self.clusterNetwork.all(c, m.cidr != c.cidr)) && self.machineNetwork.all(m, self.serviceNetwork.all(s, m.cidr != s.cidr)) && self.clusterNetwork.all(c, self.serviceNetwork.all(s, c.cidr != s.cidr)))))",message="CIDR ranges in machineNetwork, clusterNetwork, and serviceNetwork must be unique and non-overlapping" type ClusterNetworking struct { - // MachineNetwork is the list of IP address pools for machines. - // + // machineNetwork is the list of IP address pools for machines. + // This might be used among other things to generate appropriate networking security groups in some clouds providers. + // Currently only one entry or two for dual stack is supported. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="machineNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:ListType=atomic // +immutable // +optional MachineNetwork []MachineNetworkEntry `json:"machineNetwork,omitempty"` - // ClusterNetwork is the list of IP address pools for pods. - // + // clusterNetwork is the list of IP address pools for pods. + // Defaults to cidr: "10.132.0.0/14". + // Currently only one entry is supported. + // This field is immutable. // +immutable + // +optional // +kubebuilder:default:={{cidr: "10.132.0.0/14"}} - ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` - - // ServiceNetwork is the list of IP address pools for services. - // NOTE: currently only one entry is supported. - // + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="clusterNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // serviceNetwork is the list of IP address pools for services. + // Defaults to cidr: "172.31.0.0/16". + // Currently only one entry is supported. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="serviceNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 // +optional // +kubebuilder:default:={{cidr: "172.31.0.0/16"}} - ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork"` + ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork,omitempty"` - // NetworkType specifies the SDN provider used for cluster networking. - // + // networkType specifies the SDN provider used for cluster networking. + // Defaults to OVNKubernetes. + // This field is required and immutable. + // kubebuilder:validation:XValidation:rule="self == oldSelf", message="networkType is immutable" + // +optional // +kubebuilder:default:="OVNKubernetes" // +immutable - NetworkType NetworkType `json:"networkType"` + NetworkType NetworkType `json:"networkType,omitempty"` - // APIServer contains advanced network settings for the API server that affect - // how the APIServer is exposed inside a cluster node. + // apiServer contains advanced network settings for the API server that affect + // how the APIServer is exposed inside a hosted cluster node. // - // +immutable + // +optional APIServer *APIServerNetworking `json:"apiServer,omitempty"` } // MachineNetworkEntry is a single IP address block for node IP blocks. type MachineNetworkEntry struct { - // CIDR is the IP block address pool for machines within the cluster. + // cidr is the IP block address pool for machines within the cluster. + // +required CIDR ipnet.IPNet `json:"cidr"` } // ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks // are allocated with size 2^HostSubnetLength. type ClusterNetworkEntry struct { - // CIDR is the IP block address pool. + // cidr is the IP block address pool. + // +required CIDR ipnet.IPNet `json:"cidr"` - // HostPrefix is the prefix size to allocate to each node from the CIDR. - // For example, 24 would allocate 2^8=256 adresses to each node. If this + // hostPrefix is the prefix size to allocate to each node from the CIDR. + // For example, 24 would allocate 2^(32-24)=2^8=256 addresses to each node. If this // field is not used by the plugin, it can be left unset. // +optional HostPrefix int32 `json:"hostPrefix,omitempty"` @@ -745,33 +1048,47 @@ type ClusterNetworkEntry struct { // ServiceNetworkEntry is a single IP address block for the service network. type ServiceNetworkEntry struct { - // CIDR is the IP block address pool for services within the cluster. + // cidr is the IP block address pool for services within the cluster in CIDR format (e.g., 192.168.1.0/24 or 2001:0db8::/64) + // +required CIDR ipnet.IPNet `json:"cidr"` } -// +kubebuilder:validation:Pattern:=`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$` +// +kubebuilder:validation:XValidation:rule=`self.matches('^((\\d{1,3}\\.){3}\\d{1,3}/\\d{1,2})$') || self.matches('^([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?/[0-9]{1,3}$')`,message="cidr must be a valid IPv4 or IPv6 CIDR notation (e.g., 192.168.1.0/24 or 2001:db8::/64)" +// +kubebuilder:validation:MaxLength=43 type CIDRBlock string // APIServerNetworking specifies how the APIServer is exposed inside a cluster // node. type APIServerNetworking struct { - // AdvertiseAddress is the address that nodes will use to talk to the API + // advertiseAddress is the address that pods within the nodes will use to talk to the API // server. This is an address associated with the loopback adapter of each // node. If not specified, the controller will take default values. // The default values will be set as 172.20.0.1 or fd00::1. + // This value is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="advertiseAddress is immutable" + // +optional + // +kubebuilder:validation:MaxLength=255 AdvertiseAddress *string `json:"advertiseAddress,omitempty"` - // Port is the port at which the APIServer is exposed inside a node. Other + // port is the port at which the APIServer is exposed inside a node. Other // pods using host networking cannot listen on this port. - // If unset 6443 is used. + // If omitted 6443 is used. // This is useful to choose a port other than the default one which might interfere with customer environments e.g. https://github.com/openshift/hypershift/pull/356. // Setting this to 443 is possible only for backward compatibility reasons and it's discouraged. // Doing so, it would result in the controller overriding the KAS endpoint in the guest cluster having a discrepancy with the KAS Pod and potentially causing temporarily network failures. + // This value is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="port is immutable" + // +optional Port *int32 `json:"port,omitempty"` - // AllowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer + // allowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer. // If not specified, traffic is allowed from all addresses. - // This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges + // This field is enforced for ARO (Azure Red Hat OpenShift) via the shared-ingress HAProxy. + // For platforms other than ARO, the enforcement depends on whether the underlying cloud provider supports the Service LoadBalancerSourceRanges field. + // If the platform does not support LoadBalancerSourceRanges, this field may have no effect. + // +kubebuilder:validation:MaxItems=50 + // +listType=set + // +optional AllowedCIDRBlocks []CIDRBlock `json:"allowedCIDRBlocks,omitempty"` } @@ -795,8 +1112,7 @@ const ( ) // PlatformType is a specific supported infrastructure provider. -// -// +kubebuilder:validation:Enum=AWS;None;IBMCloud;Agent;KubeVirt;Azure;PowerVS +// +kubebuilder:validation:MaxLength=100 type PlatformType string const ( @@ -820,6 +1136,9 @@ const ( // PowerVSPlatform represents PowerVS infrastructure. PowerVSPlatform PlatformType = "PowerVS" + + // OpenStackPlatform represents OpenStack infrastructure. + OpenStackPlatform PlatformType = "OpenStack" ) // List all PlatformType instances @@ -832,1012 +1151,281 @@ func PlatformTypes() []PlatformType { KubevirtPlatform, AzurePlatform, PowerVSPlatform, + OpenStackPlatform, } } // PlatformSpec specifies the underlying infrastructure provider for the cluster // and is used to configure platform specific behavior. type PlatformSpec struct { - // Type is the type of infrastructure provider for the cluster. + // type is the type of infrastructure provider for the cluster. // // +unionDiscriminator + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Type is immutable" // +immutable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None + // +openshift:validation:FeatureGateAwareEnum:featureGate=OpenStack,enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None;OpenStack + // +required Type PlatformType `json:"type"` - // AWS specifies configuration for clusters running on Amazon Web Services. + // aws specifies configuration for clusters running on Amazon Web Services. // // +optional // +immutable AWS *AWSPlatformSpec `json:"aws,omitempty"` - // Agent specifies configuration for agent-based installations. + // agent specifies configuration for agent-based installations. // // +optional // +immutable Agent *AgentPlatformSpec `json:"agent,omitempty"` - // IBMCloud defines IBMCloud specific settings for components + // ibmcloud defines IBMCloud specific settings for components + // +optional IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - // Azure defines azure specific settings + // azure defines azure specific settings + // +optional Azure *AzurePlatformSpec `json:"azure,omitempty"` - // PowerVS specifies configuration for clusters running on IBMCloud Power VS Service. + // powervs specifies configuration for clusters running on IBMCloud Power VS Service. // This field is immutable. Once set, It can't be changed. // // +optional // +immutable PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` - // KubeVirt defines KubeVirt specific settings for cluster components. + // kubevirt defines KubeVirt specific settings for cluster components. // // +optional // +immutable Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` -} - -type KubevirtPlatformCredentials struct { - // InfraKubeConfigSecret is a reference to a secret that contains the kubeconfig for the external infra cluster - // that will be used to host the KubeVirt virtual machines for this cluster. - // - // +immutable - // +kubebuilder:validation:Required - // +required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraKubeConfigSecret is immutable" - InfraKubeConfigSecret *KubeconfigSecretRef `json:"infraKubeConfigSecret,omitempty"` - - // InfraNamespace defines the namespace on the external infra cluster that is used to host the KubeVirt - // virtual machines. This namespace must already exist before creating the HostedCluster and the kubeconfig - // referenced in the InfraKubeConfigSecret must have access to manage the required resources within this - // namespace. - // - // +immutable - // +kubebuilder:validation:Required - // +required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraNamespace is immutable" - InfraNamespace string `json:"infraNamespace"` -} - -// KubevirtPlatformSpec specifies configuration for kubevirt guest cluster installations -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.generateID) || has(self.generateID)", message="Kubevirt GenerateID is required once set" -type KubevirtPlatformSpec struct { - // BaseDomainPassthrough toggles whether or not an automatically - // generated base domain for the guest cluster should be used that - // is a subdomain of the management cluster's *.apps DNS. - // - // For the KubeVirt platform, the basedomain can be autogenerated using - // the *.apps domain of the management/infra hosting cluster - // This makes the guest cluster's base domain a subdomain of the - // hypershift infra/mgmt cluster's base domain. - // - // Example: - // Infra/Mgmt cluster's DNS - // Base: example.com - // Cluster: mgmt-cluster.example.com - // Apps: *.apps.mgmt-cluster.example.com - // KubeVirt Guest cluster's DNS - // Base: apps.mgmt-cluster.example.com - // Cluster: guest.apps.mgmt-cluster.example.com - // Apps: *.apps.guest.apps.mgmt-cluster.example.com - // - // This is possible using OCP wildcard routes - // - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPassthrough is immutable" - BaseDomainPassthrough *bool `json:"baseDomainPassthrough,omitempty"` - - // GenerateID is used to uniquely apply a name suffix to resources associated with - // kubevirt infrastructure resources - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Kubevirt GenerateID is immutable once set" - // +kubebuilder:validation:MaxLength=11 - // +optional - GenerateID string `json:"generateID,omitempty"` - // Credentials defines the client credentials used when creating KubeVirt virtual machines. - // Defining credentials is only necessary when the KubeVirt virtual machines are being placed - // on a cluster separate from the one hosting the Hosted Control Plane components. - // - // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on - // the same cluster and namespace as the Hosted Control Plane. - // +optional - Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` - - // StorageDriver defines how the KubeVirt CSI driver exposes StorageClasses on - // the infra cluster (hosting the VMs) to the guest cluster. - // - // +kubebuilder:validation:Optional - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver is immutable" - StorageDriver *KubevirtStorageDriverSpec `json:"storageDriver,omitempty"` -} - -// KubevirtStorageDriverConfigType defines how the kubevirt storage driver is configured. -// -// +kubebuilder:validation:Enum=None;Default;Manual -type KubevirtStorageDriverConfigType string - -const ( - // NoneKubevirtStorageDriverConfigType means no kubevirt storage driver is used - NoneKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "None" - - // DefaultKubevirtStorageDriverConfigType means the kubevirt storage driver maps to the - // underlying infra cluster's default storageclass - DefaultKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Default" - - // ManualKubevirtStorageDriverConfigType means the kubevirt storage driver mapping is - // explicitly defined. - ManualKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Manual" -) - -type KubevirtStorageDriverSpec struct { - // Type represents the type of kubevirt csi driver configuration to use - // - // +unionDiscriminator - // +immutable - // +kubebuilder:default=Default - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Type is immutable" - Type KubevirtStorageDriverConfigType `json:"type,omitempty"` - // Manual is used to explicilty define how the infra storageclasses are - // mapped to guest storageclasses - // - // +immutable + // openstack specifies configuration for clusters running on OpenStack. // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Manual is immutable" - Manual *KubevirtManualStorageDriverConfig `json:"manual,omitempty"` -} - -type KubevirtManualStorageDriverConfig struct { - // StorageClassMapping maps StorageClasses on the infra cluster hosting - // the KubeVirt VMs to StorageClasses that are made available within the - // Guest Cluster. - // - // NOTE: It is possible that not all capablities of an infra cluster's - // storageclass will be present for the corresponding guest clusters storageclass. - // - // +optional - // +immutable - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassMapping is immutable" - StorageClassMapping []KubevirtStorageClassMapping `json:"storageClassMapping,omitempty"` - - // +optional - // +immutable - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="volumeSnapshotClassMapping is immutable" - VolumeSnapshotClassMapping []KubevirtVolumeSnapshotClassMapping `json:"volumeSnapshotClassMapping,omitempty"` -} - -type KubevirtStorageClassMapping struct { - // Group contains which group this mapping belongs to. - Group string `json:"group,omitempty"` - // InfraStorageClassName is the name of the infra cluster storage class that - // will be exposed to the guest. - InfraStorageClassName string `json:"infraStorageClassName"` - // GuestStorageClassName is the name that the corresponding storageclass will - // be called within the guest cluster - GuestStorageClassName string `json:"guestStorageClassName"` -} - -type KubevirtVolumeSnapshotClassMapping struct { - // Group contains which group this mapping belongs to. - Group string `json:"group,omitempty"` - // InfraStorageClassName is the name of the infra cluster volume snapshot class that - // will be exposed to the guest. - InfraVolumeSnapshotClassName string `json:"infraVolumeSnapshotClassName"` - // GuestVolumeSnapshotClassName is the name that the corresponding volumeSnapshotClass will - // be called within the guest cluster - GuestVolumeSnapshotClassName string `json:"guestVolumeSnapshotClassName"` -} - -// AgentPlatformSpec specifies configuration for agent-based installations. -type AgentPlatformSpec struct { - // AgentNamespace is the namespace where to search for Agents for this cluster - AgentNamespace string `json:"agentNamespace"` + // +openshift:enable:FeatureGate=OpenStack + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` } // IBMCloudPlatformSpec defines IBMCloud specific settings for components type IBMCloudPlatformSpec struct { - // ProviderType is a specific supported infrastructure provider within IBM Cloud. + // providerType is a specific supported infrastructure provider within IBM Cloud. + // +optional ProviderType configv1.IBMCloudProviderType `json:"providerType,omitempty"` } -// PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components -type PowerVSPlatformSpec struct { - // AccountID is the IBMCloud account id. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - AccountID string `json:"accountID"` - - // CISInstanceCRN is the IBMCloud CIS Service Instance's Cloud Resource Name - // This field is immutable. Once set, It can't be changed. - // - // +kubebuilder:validation:Pattern=`^crn:` - // +immutable - CISInstanceCRN string `json:"cisInstanceCRN"` - - // ResourceGroup is the IBMCloud Resource Group in which the cluster resides. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - ResourceGroup string `json:"resourceGroup"` - - // Region is the IBMCloud region in which the cluster resides. This configures the - // OCP control plane cloud integrations, and is used by NodePool to resolve - // the correct boot image for a given release. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Region string `json:"region"` - - // Zone is the availability zone where control plane cloud resources are - // created. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Zone string `json:"zone"` - - // Subnet is the subnet to use for control plane cloud resources. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Subnet *PowerVSResourceReference `json:"subnet"` - - // ServiceInstance is the reference to the Power VS service on which the server instance(VM) will be created. - // Power VS service is a container for all Power VS instances at a specific geographic region. - // serviceInstance can be created via IBM Cloud catalog or CLI. - // ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud UI or IBM Cloud cli. - // - // More detail about Power VS service instance. - // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - // - // This field is immutable. Once set, It can't be changed. - // - // +immutable - ServiceInstanceID string `json:"serviceInstanceID"` - - // VPC specifies IBM Cloud PowerVS Load Balancing configuration for the control - // plane. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - VPC *PowerVSVPC `json:"vpc"` - - // KubeCloudControllerCreds is a reference to a secret containing cloud - // credentials with permissions matching the cloud controller policy. - // This field is immutable. Once set, It can't be changed. - // - // TODO(dan): document the "cloud controller policy" - // - // +immutable - KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"` - - // NodePoolManagementCreds is a reference to a secret containing cloud - // credentials with permissions matching the node pool management policy. - // This field is immutable. Once set, It can't be changed. - // - // TODO(dan): document the "node pool management policy" - // - // +immutable - NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"` - - // IngressOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for ingress operator to get authenticated with ibm cloud. - // - // +immutable - IngressOperatorCloudCreds corev1.LocalObjectReference `json:"ingressOperatorCloudCreds"` - - // StorageOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for storage operator to get authenticated with ibm cloud. - // - // +immutable - StorageOperatorCloudCreds corev1.LocalObjectReference `json:"storageOperatorCloudCreds"` - - // ImageRegistryOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for image registry operator to get authenticated with ibm cloud. - // - // +immutable - ImageRegistryOperatorCloudCreds corev1.LocalObjectReference `json:"imageRegistryOperatorCloudCreds"` +// Release represents the metadata for an OCP release payload image. +type Release struct { + // image is the image pullspec of an OCP release payload image. + // See https://quay.io/repository/openshift-release-dev/ocp-release?tab=tags for a list of available images. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(\\w+\\S+)$')`,message="Image must start with a word character (letters, digits, or underscores) and contain no white spaces" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +required + Image string `json:"image"` } -// PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control -// plane. -type PowerVSVPC struct { - // Name for VPC to used for all the service load balancer. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Name string `json:"name"` - - // Region is the IBMCloud region in which VPC gets created, this VPC used for all the ingress traffic - // into the OCP cluster. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Region string `json:"region"` - - // Zone is the availability zone where load balancer cloud resources are - // created. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - // +optional - Zone string `json:"zone,omitempty"` - - // Subnet is the subnet to use for load balancer. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - // +optional - Subnet string `json:"subnet,omitempty"` +// We expose here internal configuration knobs that won't be exposed to the service. +type AutoNode struct { + // provisionerConfig is the implementation used for Node auto provisioning. + // +required + Provisioner *ProvisionerConfig `json:"provisionerConfig"` } -// PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name. -// Only one of ID, or Name may be specified. Specifying more than one will result in -// a validation error. -type PowerVSResourceReference struct { - // ID of resource - // +optional - ID *string `json:"id,omitempty"` - - // Name of resource +// ProvisionerConfig is a enum specifying the strategy for auto managing Nodes. +type ProvisionerConfig struct { + // name specifies the name of the provisioner to use. + // +required + // +kubebuilder:validation:Enum=Karpenter + Name Provisioner `json:"name"` + // karpenter specifies the configuration for the Karpenter provisioner. // +optional - Name *string `json:"name,omitempty"` + Karpenter *KarpenterConfig `json:"karpenter,omitempty"` } -// AWSCloudProviderConfig specifies AWS networking configuration. -type AWSCloudProviderConfig struct { - // Subnet is the subnet to use for control plane cloud resources. - // - // +optional - Subnet *AWSResourceReference `json:"subnet,omitempty"` - - // Zone is the availability zone where control plane cloud resources are - // created. - // +type KarpenterConfig struct { + // platform specifies the platform-specific configuration for Karpenter. + // +required + Platform PlatformType `json:"platform"` + // aws specifies the AWS-specific configuration for Karpenter. // +optional - Zone string `json:"zone,omitempty"` - - // VPC is the VPC to use for control plane cloud resources. - VPC string `json:"vpc"` + AWS *KarpenterAWSConfig `json:"aws,omitempty"` } -// AWSEndpointAccessType specifies the publishing scope of cluster endpoints. -type AWSEndpointAccessType string +type KarpenterAWSConfig struct { + // roleARN specifies the ARN of the Karpenter provisioner. + // +required + // +kubebuilder:validation:MaxLength=255 + RoleARN string `json:"roleARN"` +} const ( - // Public endpoint access allows public API server access and public node - // communication with the control plane. - Public AWSEndpointAccessType = "Public" - - // PublicAndPrivate endpoint access allows public API server access and - // private node communication with the control plane. - PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate" - - // Private endpoint access allows only private API server access and private - // node communication with the control plane. - Private AWSEndpointAccessType = "Private" + ProvisionerKarpeneter Provisioner = "Karpenter" ) -// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services. -type AWSPlatformSpec struct { - // Region is the AWS region in which the cluster resides. This configures the - // OCP control plane cloud integrations, and is used by NodePool to resolve - // the correct boot AMI for a given release. - // - // +immutable - Region string `json:"region"` +// provisioner is a enum specifying the strategy for auto managing Nodes. +// +kubebuilder:validation:Enum=Karpenter +type Provisioner string - // CloudProviderConfig specifies AWS networking configuration for the control - // plane. - // This is mainly used for cloud provider controller config: - // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364 - // TODO(dan): should this be named AWSNetworkConfig? +// Configures when and how to scale down cluster nodes. +type ScaleDownConfig struct { + // delayAfterAddSeconds sets how long after scale up the scale down evaluation resumes in seconds. + // It must be between 0 and 86400 (24 hours). + // When set to 0, this means scale down evaluation will resume immediately after scale up, without any delay. + // When omitted, the autoscaler defaults to 600s (10 minutes). // + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=86400 // +optional - // +immutable - CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"` + DelayAfterAddSeconds *int32 `json:"delayAfterAddSeconds,omitempty"` - // ServiceEndpoints specifies optional custom endpoints which will override - // the default service endpoint of specific AWS Services. - // - // There must be only one ServiceEndpoint for a given service name. + // delayAfterDeleteSeconds sets how long after node deletion, scale down evaluation resumes, defaults to scan-interval. + // It must be between 0 and 86400 (24 hours). + // When set to 0, this means scale down evaluation will resume immediately after node deletion, without any delay. + // When omitted, the autoscaler defaults to 0s. // + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=86400 // +optional - // +immutable - ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + DelayAfterDeleteSeconds *int32 `json:"delayAfterDeleteSeconds,omitempty"` - // RolesRef contains references to various AWS IAM roles required to enable - // integrations such as OIDC. - // - // +immutable - RolesRef AWSRolesRef `json:"rolesRef"` - - // ResourceTags is a list of additional tags to apply to AWS resources created - // for the cluster. See - // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for - // information on tagging AWS resources. AWS supports a maximum of 50 tags per - // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available - // for the user. + // delayAfterFailureSeconds sets how long after a scale down failure, scale down evaluation resumes. + // It must be between 0 and 86400 (24 hours). + // When set to 0, this means scale down evaluation will resume immediately after a scale down failure, without any delay. + // When omitted, the autoscaler defaults to 180s (3 minutes). // - // +kubebuilder:validation:MaxItems=25 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=86400 // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + DelayAfterFailureSeconds *int32 `json:"delayAfterFailureSeconds,omitempty"` - // EndpointAccess specifies the publishing scope of cluster endpoints. The - // default is Public. + // unneededDurationSeconds establishes how long a node should be unneeded before it is eligible for scale down in seconds. + // It must be between 0 and 86400 (24 hours). + // When omitted, the autoscaler defaults to 600s (10 minutes). // - // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private - // +kubebuilder:default=Public + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=86400 // +optional - EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"` + UnneededDurationSeconds *int32 `json:"unneededDurationSeconds,omitempty"` - // AdditionalAllowedPrincipals specifies a list of additional allowed principal ARNs - // to be added to the hosted control plane's VPC Endpoint Service to enable additional - // VPC Endpoint connection requests to be automatically accepted. - // See https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html - // for more details around VPC Endpoint Service allowed principals. + // utilizationThresholdPercent determines the node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. + // The value represents a percentage from 0 to 100. + // When set to 0, this means nodes will only be considered for scale down if they are completely idle (0% utilization). + // When set to 100, this means nodes will be considered for scale down regardless of their utilization level. + // A value between 0 and 100 represents the utilization threshold below which a node can be considered for scale down. + // When omitted, the autoscaler defaults to 50%. // + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 // +optional - AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` - - // MultiArch specifies whether the Hosted Cluster will be expected to support NodePools with different - // CPU architectures, i.e., supporting arm64 NodePools and supporting amd64 NodePools on the same Hosted Cluster. - // +kubebuilder:default=false - // +optional - MultiArch bool `json:"multiArch"` -} - -type AWSRoleCredentials struct { - ARN string `json:"arn"` - Namespace string `json:"namespace"` - Name string `json:"name"` -} - -// AWSResourceTag is a tag to apply to AWS resources created for the cluster. -type AWSResourceTag struct { - // Key is the key of the tag. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - Key string `json:"key"` - // Value is the value of the tag. - // - // Some AWS service do not support empty values. Since tags are added to - // resources in many services, the length of the tag value must meet the - // requirements of all services. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - Value string `json:"value"` -} - -// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. -type AWSRolesRef struct { - // The referenced role must have a trust relationship that allows it to be assumed via web identity. - // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. - // Example: - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Principal": { - // "Federated": "{{ .ProviderARN }}" - // }, - // "Action": "sts:AssumeRoleWithWebIdentity", - // "Condition": { - // "StringEquals": { - // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} - // } - // } - // } - // ] - // } - // - // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "elasticloadbalancing:DescribeLoadBalancers", - // "tag:GetResources", - // "route53:ListHostedZones" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "route53:ChangeResourceRecordSets" - // ], - // "Resource": [ - // "arn:aws:route53:::PUBLIC_ZONE_ID", - // "arn:aws:route53:::PRIVATE_ZONE_ID" - // ] - // } - // ] - // } - IngressARN string `json:"ingressARN"` - - // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "s3:CreateBucket", - // "s3:DeleteBucket", - // "s3:PutBucketTagging", - // "s3:GetBucketTagging", - // "s3:PutBucketPublicAccessBlock", - // "s3:GetBucketPublicAccessBlock", - // "s3:PutEncryptionConfiguration", - // "s3:GetEncryptionConfiguration", - // "s3:PutLifecycleConfiguration", - // "s3:GetLifecycleConfiguration", - // "s3:GetBucketLocation", - // "s3:ListBucket", - // "s3:GetObject", - // "s3:PutObject", - // "s3:DeleteObject", - // "s3:ListBucketMultipartUploads", - // "s3:AbortMultipartUpload", - // "s3:ListMultipartUploadParts" - // ], - // "Resource": "*" - // } - // ] - // } - ImageRegistryARN string `json:"imageRegistryARN"` - - // StorageARN is an ARN value referencing a role appropriate for the Storage Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:AttachVolume", - // "ec2:CreateSnapshot", - // "ec2:CreateTags", - // "ec2:CreateVolume", - // "ec2:DeleteSnapshot", - // "ec2:DeleteTags", - // "ec2:DeleteVolume", - // "ec2:DescribeInstances", - // "ec2:DescribeSnapshots", - // "ec2:DescribeTags", - // "ec2:DescribeVolumes", - // "ec2:DescribeVolumesModifications", - // "ec2:DetachVolume", - // "ec2:ModifyVolume" - // ], - // "Resource": "*" - // } - // ] - // } - StorageARN string `json:"storageARN"` - - // NetworkARN is an ARN value referencing a role appropriate for the Network Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:DescribeInstances", - // "ec2:DescribeInstanceStatus", - // "ec2:DescribeInstanceTypes", - // "ec2:UnassignPrivateIpAddresses", - // "ec2:AssignPrivateIpAddresses", - // "ec2:UnassignIpv6Addresses", - // "ec2:AssignIpv6Addresses", - // "ec2:DescribeSubnets", - // "ec2:DescribeNetworkInterfaces" - // ], - // "Resource": "*" - // } - // ] - // } - NetworkARN string `json:"networkARN"` - - // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. - // Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Action": [ - // "autoscaling:DescribeAutoScalingGroups", - // "autoscaling:DescribeLaunchConfigurations", - // "autoscaling:DescribeTags", - // "ec2:DescribeAvailabilityZones", - // "ec2:DescribeInstances", - // "ec2:DescribeImages", - // "ec2:DescribeRegions", - // "ec2:DescribeRouteTables", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeSubnets", - // "ec2:DescribeVolumes", - // "ec2:CreateSecurityGroup", - // "ec2:CreateTags", - // "ec2:CreateVolume", - // "ec2:ModifyInstanceAttribute", - // "ec2:ModifyVolume", - // "ec2:AttachVolume", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:CreateRoute", - // "ec2:DeleteRoute", - // "ec2:DeleteSecurityGroup", - // "ec2:DeleteVolume", - // "ec2:DetachVolume", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:DescribeVpcs", - // "elasticloadbalancing:AddTags", - // "elasticloadbalancing:AttachLoadBalancerToSubnets", - // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - // "elasticloadbalancing:CreateLoadBalancer", - // "elasticloadbalancing:CreateLoadBalancerPolicy", - // "elasticloadbalancing:CreateLoadBalancerListeners", - // "elasticloadbalancing:ConfigureHealthCheck", - // "elasticloadbalancing:DeleteLoadBalancer", - // "elasticloadbalancing:DeleteLoadBalancerListeners", - // "elasticloadbalancing:DescribeLoadBalancers", - // "elasticloadbalancing:DescribeLoadBalancerAttributes", - // "elasticloadbalancing:DetachLoadBalancerFromSubnets", - // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - // "elasticloadbalancing:ModifyLoadBalancerAttributes", - // "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - // "elasticloadbalancing:AddTags", - // "elasticloadbalancing:CreateListener", - // "elasticloadbalancing:CreateTargetGroup", - // "elasticloadbalancing:DeleteListener", - // "elasticloadbalancing:DeleteTargetGroup", - // "elasticloadbalancing:DeregisterTargets", - // "elasticloadbalancing:DescribeListeners", - // "elasticloadbalancing:DescribeLoadBalancerPolicies", - // "elasticloadbalancing:DescribeTargetGroups", - // "elasticloadbalancing:DescribeTargetHealth", - // "elasticloadbalancing:ModifyListener", - // "elasticloadbalancing:ModifyTargetGroup", - // "elasticloadbalancing:RegisterTargets", - // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - // "iam:CreateServiceLinkedRole", - // "kms:DescribeKey" - // ], - // "Resource": [ - // "*" - // ], - // "Effect": "Allow" - // } - // ] - // } - // +immutable - KubeCloudControllerARN string `json:"kubeCloudControllerARN"` - - // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Action": [ - // "ec2:AssociateRouteTable", - // "ec2:AttachInternetGateway", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:CreateInternetGateway", - // "ec2:CreateNatGateway", - // "ec2:CreateRoute", - // "ec2:CreateRouteTable", - // "ec2:CreateSecurityGroup", - // "ec2:CreateSubnet", - // "ec2:CreateTags", - // "ec2:DeleteInternetGateway", - // "ec2:DeleteNatGateway", - // "ec2:DeleteRouteTable", - // "ec2:DeleteSecurityGroup", - // "ec2:DeleteSubnet", - // "ec2:DeleteTags", - // "ec2:DescribeAccountAttributes", - // "ec2:DescribeAddresses", - // "ec2:DescribeAvailabilityZones", - // "ec2:DescribeImages", - // "ec2:DescribeInstances", - // "ec2:DescribeInternetGateways", - // "ec2:DescribeNatGateways", - // "ec2:DescribeNetworkInterfaces", - // "ec2:DescribeNetworkInterfaceAttribute", - // "ec2:DescribeRouteTables", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeSubnets", - // "ec2:DescribeVpcs", - // "ec2:DescribeVpcAttribute", - // "ec2:DescribeVolumes", - // "ec2:DetachInternetGateway", - // "ec2:DisassociateRouteTable", - // "ec2:DisassociateAddress", - // "ec2:ModifyInstanceAttribute", - // "ec2:ModifyNetworkInterfaceAttribute", - // "ec2:ModifySubnetAttribute", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:RunInstances", - // "ec2:TerminateInstances", - // "tag:GetResources", - // "ec2:CreateLaunchTemplate", - // "ec2:CreateLaunchTemplateVersion", - // "ec2:DescribeLaunchTemplates", - // "ec2:DescribeLaunchTemplateVersions", - // "ec2:DeleteLaunchTemplate", - // "ec2:DeleteLaunchTemplateVersions" - // ], - // "Resource": [ - // "*" - // ], - // "Effect": "Allow" - // }, - // { - // "Condition": { - // "StringLike": { - // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" - // } - // }, - // "Action": [ - // "iam:CreateServiceLinkedRole" - // ], - // "Resource": [ - // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" - // ], - // "Effect": "Allow" - // }, - // { - // "Action": [ - // "iam:PassRole" - // ], - // "Resource": [ - // "arn:*:iam::*:role/*-worker-role" - // ], - // "Effect": "Allow" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "kms:Decrypt", - // "kms:ReEncrypt", - // "kms:GenerateDataKeyWithoutPlainText", - // "kms:DescribeKey" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "kms:CreateGrant" - // ], - // "Resource": "*", - // "Condition": { - // "Bool": { - // "kms:GrantIsForAWSResource": true - // } - // } - // } - // ] - // } - // - // +immutable - NodePoolManagementARN string `json:"nodePoolManagementARN"` - - // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:CreateVpcEndpoint", - // "ec2:DescribeVpcEndpoints", - // "ec2:ModifyVpcEndpoint", - // "ec2:DeleteVpcEndpoints", - // "ec2:CreateTags", - // "route53:ListHostedZones", - // "ec2:CreateSecurityGroup", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:AuthorizeSecurityGroupEgress", - // "ec2:DeleteSecurityGroup", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:RevokeSecurityGroupEgress", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeVpcs", - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "route53:ChangeResourceRecordSets", - // "route53:ListResourceRecordSets" - // ], - // "Resource": "arn:aws:route53:::%s" - // } - // ] - // } - // +immutable - ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"` + UtilizationThresholdPercent *int32 `json:"utilizationThresholdPercent,omitempty"` } -// AWSServiceEndpoint stores the configuration for services to -// override existing defaults of AWS Services. -type AWSServiceEndpoint struct { - // Name is the name of the AWS service. - // This must be provided and cannot be empty. - Name string `json:"name"` - - // URL is fully qualified URI with scheme https, that overrides the default generated - // endpoint for a client. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` -} +// ExpanderString contains the name of an expander to be used by the cluster autoscaler. +// +kubebuilder:validation:Enum=LeastWaste;Priority;Random +type ExpanderString string -// AzurePlatformSpec specifies configuration for clusters running on Azure. Generally, the HyperShift API assumes bring -// your own (BYO) cloud infrastructure resources. For example, resources like a resource group, a subnet, or a vnet -// would be pre-created and then their names would be used respectively in the ResourceGroupName, SubnetName, VnetName -// fields of the Hosted Cluster CR. An existing cloud resource is expected to exist under the same SubscriptionID. -type AzurePlatformSpec struct { - // Credentials is the object containing existing Azure credentials needed for creating and managing cloud - // infrastructure resources. - // - // +kubebuilder:validation:Required - // +required - Credentials corev1.LocalObjectReference `json:"credentials"` +// These constants define the valid values for an ExpanderString +const ( + LeastWasteExpander ExpanderString = "LeastWaste" // Selects the node group with the least idle resources. + PriorityExpander ExpanderString = "Priority" // Selects the node group with the highest priority. + RandomExpander ExpanderString = "Random" // Selects a node group randomly. +) - // Cloud is the cloud environment identifier, valid values could be found here: https://github.com/Azure/go-autorest/blob/4c0e21ca2bbb3251fe7853e6f9df6397f53dd419/autorest/azure/environments.go#L33 - // - // +kubebuilder:validation:Enum=AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud - // +kubebuilder:default="AzurePublicCloud" - Cloud string `json:"cloud,omitempty"` +// ScalingType defines the scaling behavior for the cluster autoscaler. +// +kubebuilder:validation:Enum=ScaleUpOnly;ScaleUpAndScaleDown +type ScalingType string - // Location is the Azure region in where all the cloud infrastructure resources will be created. - // - // Example: eastus - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Location is immutable" - // +immutable - // +required - Location string `json:"location"` - - // ResourceGroupName is the name of an existing resource group where all cloud resources created by the Hosted - // Cluster are to be placed. The resource group is expected to exist under the same subscription as SubscriptionID. - // - // In ARO HCP, this will be the managed resource group where customer cloud resources will be created. - // - // Resource group naming requirements can be found here: https://azure.github.io/PSRule.Rules.Azure/en/rules/Azure.ResourceGroup.Name/. - // - //Example: if your resource group ID is /subscriptions//resourceGroups/, your - // ResourceGroupName is . - // - // +kubebuilder:default:=default - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_()\-\.]{1,89}[a-zA-Z0-9_()\-]$` - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ResourceGroupName is immutable" - // +immutable - // +required - ResourceGroupName string `json:"resourceGroup"` +const ( + // ScaleUpOnly means the autoscaler will only scale up nodes, never scale down. + ScaleUpOnly ScalingType = "ScaleUpOnly" - // VnetID is the ID of an existing VNET to use in creating VMs. The VNET can exist in a different resource group - // other than the one specified in ResourceGroupName, but it must exist under the same subscription as - // SubscriptionID. - // - // In ARO HCP, this will be the ID of the customer provided VNET. - // - // Example: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/ - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="VnetID is immutable" - // +immutable - // +required - VnetID string `json:"vnetID,omitempty"` + // ScaleUpAndScaleDown means the autoscaler will both scale up and scale down nodes. + ScaleUpAndScaleDown ScalingType = "ScaleUpAndScaleDown" +) - // SubnetID is the subnet ID of an existing subnet where the load balancer for node egress will be created. This - // subnet is expected to be a subnet within the VNET specified in VnetID. This subnet is expected to exist under the - // same subscription as SubscriptionID. - // - // In ARO HCP, managed services will create the aforementioned load balancer in ResourceGroupName. +// ClusterAutoscaling specifies auto-scaling behavior that applies to all +// NodePools associated with a control plane. +// +kubebuilder:validation:XValidation:rule="self.scaling == 'ScaleUpAndScaleDown' ? true : !has(self.scaleDown)",message="scaleDown can only be set when scaling is ScaleUpAndScaleDown" +type ClusterAutoscaling struct { + // scaling defines the scaling behavior for the cluster autoscaler. + // ScaleUpOnly means the autoscaler will only scale up nodes, never scale down. + // ScaleUpAndScaleDown means the autoscaler will both scale up and scale down nodes. + // When set to ScaleUpAndScaleDown, the scaleDown field can be used to configure scale down behavior. // - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" - // +kubebuilder:validation:Required - // +immutable - // +required - SubnetID string `json:"subnetID"` - - // SubscriptionID is a unique identifier for an Azure subscription used to manage resources. + // Note: This field is only supported in OpenShift versions 4.19 and above. // - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SubscriptionID is immutable" - // +immutable - // +required - SubscriptionID string `json:"subscriptionID"` + // +kubebuilder:default=ScaleUpAndScaleDown + // +optional + Scaling ScalingType `json:"scaling,omitempty"` - // MachineIdentityID is used as the user-assigned identity to be assigned to the VMs + // scaleDown configures the behavior of the Cluster Autoscaler scale down operation. + // This field is only valid when scaling is set to ScaleUpAndScaleDown. // // +optional - MachineIdentityID string `json:"machineIdentityID,omitempty"` + ScaleDown *ScaleDownConfig `json:"scaleDown,omitempty"` - // SecurityGroupID is the ID of an existing security group on the SubnetID. This field is provided as part of the - // configuration for the Azure cloud provider, aka Azure cloud controller manager (CCM). This security group is - // expected to exist under the same subscription as SubscriptionID. + // balancingIgnoredLabels sets "--balancing-ignore-label