diff --git a/Makefile b/Makefile index cc716378f..2010d453f 100644 --- a/Makefile +++ b/Makefile @@ -151,15 +151,9 @@ generate: $(CONTROLLER_GEN) #EXHELP Generate code containing DeepCopy, DeepCopyI $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." .PHONY: verify -verify: tidy fmt generate manifests crd-ref-docs update-k8s-values #HELP Verify all generated code is up-to-date. +verify: tidy fmt generate manifests crd-ref-docs #HELP Verify all generated code is up-to-date. git diff --exit-code -.PHONY: update-k8s-values # HELP Update PSA labels in config manifests with Kubernetes version -update-k8s-values: - find config -type f -name '*.yaml' -exec \ - sed -i.bak -E 's/(pod-security.kubernetes.io\/[a-zA-Z-]+-version:).*/\1 "v$(K8S_VERSION)"/g' {} +; - find config -type f -name '*.yaml.bak' -delete - .PHONY: fix-lint fix-lint: $(GOLANGCI_LINT) #EXHELP Fix lint issues $(GOLANGCI_LINT) run --fix --build-tags $(GO_BUILD_TAGS) $(GOLANGCI_LINT_ARGS) @@ -192,7 +186,7 @@ test: manifests generate fmt lint test-unit test-e2e #HELP Run all tests. .PHONY: e2e e2e: #EXHELP Run the e2e tests. - go test -count=1 -v ./test/e2e/... + go test -count=1 -v -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" ./test/e2e/... E2E_REGISTRY_NAME := docker-registry E2E_REGISTRY_NAMESPACE := operator-controller-e2e @@ -208,7 +202,10 @@ test-ext-dev-e2e: $(OPERATOR_SDK) $(KUSTOMIZE) $(KIND) #HELP Run extension creat test/extension-developer-e2e/setup.sh $(OPERATOR_SDK) $(CONTAINER_RUNTIME) $(KUSTOMIZE) $(KIND) $(KIND_CLUSTER_NAME) $(E2E_REGISTRY_NAMESPACE) go test -count=1 -v ./test/extension-developer-e2e/... -UNIT_TEST_DIRS := $(shell go list ./... | grep -v /test/) +# Define TEST_PKGS to be either user-specified or a default set of packages: +ifeq ($(origin TEST_PKGS), undefined) +TEST_PKGS := $(shell go list ./... | grep -v /test/) +endif COVERAGE_UNIT_DIR := $(ROOT_DIR)/coverage/unit .PHONY: envtest-k8s-bins #HELP Uses setup-envtest to download and install the binaries required to run ENVTEST-test based locally at the project/bin directory. @@ -224,7 +221,8 @@ test-unit: $(SETUP_ENVTEST) envtest-k8s-bins #HELP Run the unit tests -tags '$(GO_BUILD_TAGS)' \ -cover -coverprofile ${ROOT_DIR}/coverage/unit.out \ -count=1 -race -short \ - $(UNIT_TEST_DIRS) \ + -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" \ + $(TEST_PKGS) \ -test.gocoverdir=$(COVERAGE_UNIT_DIR) .PHONY: image-registry @@ -303,10 +301,15 @@ kind-clean: $(KIND) #EXHELP Delete the kind cluster. #SECTION Build -ifeq ($(origin VERSION), undefined) +# attempt to generate the VERSION attribute for certificates +# fail if it is unset afterwards, since the side effects are indirect +ifeq ($(strip $(VERSION)),) VERSION := $(shell git describe --tags --always --dirty) endif export VERSION +ifeq ($(strip $(VERSION)),) + $(error undefined VERSION; resulting certs will be invalid) +endif ifeq ($(origin CGO_ENABLED), undefined) CGO_ENABLED := 0 diff --git a/api/v1/clustercatalog_types.go b/api/v1/clustercatalog_types.go index f083c1128..ee1391b79 100644 --- a/api/v1/clustercatalog_types.go +++ b/api/v1/clustercatalog_types.go @@ -34,6 +34,14 @@ const ( AvailabilityModeAvailable AvailabilityMode = "Available" AvailabilityModeUnavailable AvailabilityMode = "Unavailable" + + // Condition types + TypeServing = "Serving" + + // Serving Reasons + ReasonAvailable = "Available" + ReasonUnavailable = "Unavailable" + ReasonUserSpecifiedUnavailable = "UserSpecifiedUnavailable" ) //+kubebuilder:object:root=true diff --git a/api/v1/clusterextension_types_test.go b/api/v1/clusterextension_types_test.go index f05427348..7bc9a3393 100644 --- a/api/v1/clusterextension_types_test.go +++ b/api/v1/clusterextension_types_test.go @@ -5,7 +5,6 @@ import ( "go/ast" "go/parser" "go/token" - "io/fs" "strconv" "strings" "testing" @@ -52,49 +51,47 @@ func TestClusterExtensionReasonRegistration(t *testing.T) { } } -// parseConstants parses the values of the top-level constants in the current -// directory whose names start with the given prefix. When running as part of a -// test, the current directory is the directory of the file that contains the -// test in which this function is called. +// parseConstants parses the values of the top-level constants that start with the given prefix, +// in the files clusterextension_types.go and common_types.go. func parseConstants(prefix string) ([]string, error) { fset := token.NewFileSet() - // ParseDir returns a map of package name to package ASTs. An AST is a representation of the source code - // that can be traversed to extract information. The map is keyed by the package name. - pkgs, err := parser.ParseDir(fset, ".", func(info fs.FileInfo) bool { - return !strings.HasSuffix(info.Name(), "_test.go") - }, 0) - if err != nil { - return nil, err + // An AST is a representation of the source code that can be traversed to extract information. + // Converting files to AST representation to extract information. + parseFiles, astFiles := []string{"clusterextension_types.go", "common_types.go"}, []*ast.File{} + for _, file := range parseFiles { + p, err := parser.ParseFile(fset, file, nil, 0) + if err != nil { + return nil, err + } + astFiles = append(astFiles, p) } var constValues []string - // Iterate all of the top-level declarations in each package's files, - // looking for constants that start with the prefix. When we find one, - // add its value to the constValues list. - for _, pkg := range pkgs { - for _, f := range pkg.Files { - for _, d := range f.Decls { - genDecl, ok := d.(*ast.GenDecl) - if !ok { + // Iterate all of the top-level declarations in each file, looking + // for constants that start with the prefix. When we find one, add + // its value to the constValues list. + for _, f := range astFiles { + for _, d := range f.Decls { + genDecl, ok := d.(*ast.GenDecl) + if !ok { + continue + } + for _, s := range genDecl.Specs { + valueSpec, ok := s.(*ast.ValueSpec) + if !ok || len(valueSpec.Names) != 1 || valueSpec.Names[0].Obj.Kind != ast.Con || !strings.HasPrefix(valueSpec.Names[0].String(), prefix) { continue } - for _, s := range genDecl.Specs { - valueSpec, ok := s.(*ast.ValueSpec) - if !ok || len(valueSpec.Names) != 1 || valueSpec.Names[0].Obj.Kind != ast.Con || !strings.HasPrefix(valueSpec.Names[0].String(), prefix) { + for _, val := range valueSpec.Values { + lit, ok := val.(*ast.BasicLit) + if !ok || lit.Kind != token.STRING { continue } - for _, val := range valueSpec.Values { - lit, ok := val.(*ast.BasicLit) - if !ok || lit.Kind != token.STRING { - continue - } - v, err := strconv.Unquote(lit.Value) - if err != nil { - return nil, fmt.Errorf("unquote literal string %s: %v", lit.Value, err) - } - constValues = append(constValues, v) + v, err := strconv.Unquote(lit.Value) + if err != nil { + return nil, fmt.Errorf("unquote literal string %s: %v", lit.Value, err) } + constValues = append(constValues, v) } } } diff --git a/api/v1/common_types.go b/api/v1/common_types.go index 6008d7557..5478039c9 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -19,7 +19,6 @@ package v1 const ( TypeInstalled = "Installed" TypeProgressing = "Progressing" - TypeServing = "Serving" // Progressing reasons ReasonSucceeded = "Succeeded" @@ -29,9 +28,4 @@ const ( // Terminal reasons ReasonDeprecated = "Deprecated" ReasonFailed = "Failed" - - // Serving reasons - ReasonAvailable = "Available" - ReasonUnavailable = "Unavailable" - ReasonUserSpecifiedUnavailable = "UserSpecifiedUnavailable" ) diff --git a/commitchecker.yaml b/commitchecker.yaml index 4137e0e56..0c0526961 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 7fc18c64660c97c70e4c6704147a746f657543f4 +expectedMergeBase: c9fa0b5be4bd20214af72b13e8d9a0d326da9e8d upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/config/base/common/namespace.yaml b/config/base/common/namespace.yaml index 3faa861ed..99d47415f 100644 --- a/config/base/common/namespace.yaml +++ b/config/base/common/namespace.yaml @@ -4,5 +4,5 @@ metadata: labels: app.kubernetes.io/part-of: olm pod-security.kubernetes.io/enforce: restricted - pod-security.kubernetes.io/enforce-version: "v1.32" + pod-security.kubernetes.io/enforce-version: latest name: system diff --git a/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md b/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md new file mode 100644 index 000000000..6b27ba27e --- /dev/null +++ b/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md @@ -0,0 +1,111 @@ +# Catalogd web server + +[Catalogd](https://github.com/operator-framework/operator-controller/tree/main/catalogd), the OLM v1 component for making catalog contents available on cluster, includes +a web server that serves catalog contents to clients via HTTP(S) endpoints. + +The endpoints to retrieve information about installable clusterextentions can be composed from the `.status.urls.base` of a `ClusterCatalog` resource with the selected access API path. + +Currently, there are two API endpoints: + +1. `api/v1/all` endpoint that provides access to the FBC metadata in entirety. + +As an example, to access the full FBC via the v1 API endpoint (indicated by path `api/v1/all`) where `.status.urls.base` is + +```yaml + urls: + base: https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio +``` + +the URL to access the service would be `https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio/api/v1/all` + +2. `api/v1/metas` endpoint that allows clients to retrieve filtered portions of the FBC. + +The metas endpoint accepts parameters which are one of the sub-types of the `Meta` [definition](https://github.com/operator-framework/operator-registry/blob/e15668c933c03e229b6c80025fdadb040ab834e0/alpha/declcfg/declcfg.go#L111-L114), following the pattern `/api/v1/metas?[&...]`. + +As an example, to access only the [package schema](https://olm.operatorframework.io/docs/reference/file-based-catalogs/#olmpackage-1) blobs of the FBC via the `api/v1/metas` endpoint where `.status.urls.base` is + +```yaml + urls: + base: https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio +``` + +the URL to access the service would be `https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio/api/v1/metas?schema=olm.package` + +For more examples of valid queries that can be made to the `api/v1/metas` service endpoint, please see [Catalog Queries](../howto/catalog-queries.md). + +!!! note + + The values of the `.status.urls` field in a `ClusterCatalog` resource are arbitrary string values and can change at any time. + While there are no guarantees on the exact value of this field, it will always contain catalog-specific API endpoints for use + by clients to make a request from within the cluster. + +## Interacting With the Server + +### Supported HTTP Methods + +The HTTP request methods supported by the catalogd web server are: + +- [GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET) +- [HEAD](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/HEAD) + +### Response Format + +Responses are encoded as a [JSON Lines](https://jsonlines.org/) stream of [File-Based Catalog](https://olm.operatorframework.io/docs/reference/file-based-catalogs) (FBC) [Meta](https://olm.operatorframework.io/docs/reference/file-based-catalogs/#schema) objects delimited by newlines. + +??? example "Example JSON-encoded FBC snippet" + + ```json + { + "schema": "olm.package", + "name": "cockroachdb", + "defaultChannel": "stable-v6.x", + } + { + "schema": "olm.channel", + "name": "stable-v6.x", + "package": "cockroachdb", + "entries": [ + { + "name": "cockroachdb.v6.0.0", + "skipRange": "<6.0.0" + } + ] + } + { + "schema": "olm.bundle", + "name": "cockroachdb.v6.0.0", + "package": "cockroachdb", + "image": "quay.io/openshift-community-operators/cockroachdb@sha256:d3016b1507515fc7712f9c47fd9082baf9ccb070aaab58ed0ef6e5abdedde8ba", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "cockroachdb", + "version": "6.0.0" + } + }, + ], + } + ``` + + Corresponding JSON lines response: + ```jsonlines + {"schema":"olm.package","name":"cockroachdb","defaultChannel":"stable-v6.x"} + {"schema":"olm.channel","name":"stable-v6.x","package":"cockroachdb","entries":[{"name":"cockroachdb.v6.0.0","skipRange":"<6.0.0"}]} + {"schema":"olm.bundle","name":"cockroachdb.v6.0.0","package":"cockroachdb","image":"quay.io/openshift-community-operators/cockroachdb@sha256:d3016b1507515fc7712f9c47fd9082baf9ccb070aaab58ed0ef6e5abdedde8ba","properties":[{"type":"olm.package","value":{"packageName":"cockroachdb","version":"6.0.0"}}]} + ``` + +### Compression Support + +The `catalogd` web server supports gzip compression of responses, which can significantly reduce associated network traffic. In order to signal that the client handles compressed responses, the client must include `Accept-Encoding: gzip` as a header in the HTTP request. + +The web server will include a `Content-Encoding: gzip` header in compressed responses. + +!!! note + + Only catalogs whose uncompressed response body would result in a response size greater than 1400 bytes will be compressed. + +### Cache Header Support + +For clients interested in caching the information returned from the `catalogd` web server, the `Last-Modified` header is set +on responses and the `If-Modified-Since` header is supported for requests. diff --git a/docs/draft/howto/catalog-queries-metas-endpoint.md b/docs/draft/howto/catalog-queries-metas-endpoint.md new file mode 100644 index 000000000..f723d504b --- /dev/null +++ b/docs/draft/howto/catalog-queries-metas-endpoint.md @@ -0,0 +1,93 @@ +# Catalog queries + +After you [add a catalog of extensions](../tutorials/add-catalog.md) to your cluster, you must port forward your catalog as a service. +Then you can query the catalog by using `curl` commands and the `jq` CLI tool to find extensions to install. + +## Prerequisites + +* You have added a ClusterCatalog of extensions, such as [OperatorHub.io](https://operatorhub.io), to your cluster. +* You have installed the `jq` CLI tool. + +!!! note + By default, Catalogd is installed with TLS enabled for the catalog webserver. + The following examples will show this default behavior, but for simplicity's sake will ignore TLS verification in the curl commands using the `-k` flag. + +!!! note + While using the `/api/v1/metas` endpoint shown in the below examples, it is important to note that the metas endpoint accepts parameters which are one of the sub-types of the `Meta` [definition](https://github.com/operator-framework/operator-registry/blob/e15668c933c03e229b6c80025fdadb040ab834e0/alpha/declcfg/declcfg.go#L111-L114), following the pattern `/api/v1/metas?[&...]`. e.g. `schema=&package=`, `schema=&name=`, and `package=&name=` are all valid parameter combinations. However `schema=&version=` is not a valid parameter combination, since version is not a first class FBC meta field. + +You also need to port forward the catalog server service: + +``` terminal +kubectl -n olmv1-system port-forward svc/catalogd-service 8443:443 +``` + +Now you can use the `curl` command with `jq` to query catalogs that are installed on your cluster. + +## Package queries + +* Available packages in a catalog: + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.package' + ``` + +* Packages that support `AllNamespaces` install mode and do not use webhooks: + ``` terminal + jq -cs '[.[] | select(.schema == "olm.bundle" and (.properties[] | select(.type == "olm.csv.metadata").value.installModes[] | select(.type == "AllNamespaces" and .supported == true)) and .spec.webhookdefinitions == null) | .package] | unique[]' + ``` + +* Package metadata: + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.package&name=' + ``` + + `` + : Name of the package from the catalog you are querying. + +* Blobs that belong to a package (that are not schema=olm.package): + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?package=' + ``` + + `` + : Name of the package from the catalog you are querying. + +Note: the `olm.package` schema blob does not have the `package` field set. In other words, to get all the blobs that belong to a package, along with the olm.package blob for that package, a combination of both of the above queries need to be used. + +## Channel queries + +* Channels in a package: + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.channel&package=' + ``` + + `` + : Name of the package from the catalog you are querying. + +* Versions in a channel: + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.channel&package=zoperator&name=alpha' | jq -s '.[] | .entries | .[] | .name' + ``` + + `` + : Name of the package from the catalog you are querying. + + `` + : Name of the channel for a given package. + +## Bundle queries + +* Bundles in a package: + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.bundle&package=' + ``` + + `` + : Name of the package from the catalog you are querying. + +* Bundle dependencies and available APIs: + ``` terminal + curl -k 'https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.bundle&name=' | jq -s '.[] | .properties[] | select(.type=="olm.gvk")' + ``` + + `` + : Name of the bundle for a given package. diff --git a/docs/draft/tutorials/explore-available-content-metas-endpoint.md b/docs/draft/tutorials/explore-available-content-metas-endpoint.md new file mode 100644 index 000000000..8ece0a75d --- /dev/null +++ b/docs/draft/tutorials/explore-available-content-metas-endpoint.md @@ -0,0 +1,147 @@ +--- +hide: + - toc +--- + +# Explore Available Content + +After you [add a catalog of extensions](add-catalog.md) to your cluster, you must port forward your catalog as a service. +Then you can query the catalog by using `curl` commands and the `jq` CLI tool to find extensions to install. + +## Prerequisites + +* You have added a ClusterCatalog of extensions, such as [OperatorHub.io](https://operatorhub.io), to your cluster. +* You have installed the `jq` CLI tool. + +!!! note + By default, Catalogd is installed with TLS enabled for the catalog webserver. + The following examples will show this default behavior, but for simplicity's sake will ignore TLS verification in the curl commands using the `-k` flag. + +## Procedure + +1. Port forward the catalog server service: + + ``` terminal + kubectl -n olmv1-system port-forward svc/catalogd-service 8443:443 + ``` + +2. Return a list of all the extensions in a catalog via the v1 API endpoint: + ``` terminal + curl -k https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.package' | jq -s '.[] | .name' + ``` + + ??? success + ``` text title="Example output" + "ack-acm-controller" + "ack-acmpca-controller" + "ack-apigatewayv2-controller" + "ack-applicationautoscaling-controller" + "ack-cloudfront-controller" + "ack-cloudtrail-controller" + "ack-cloudwatch-controller" + "ack-cloudwatchlogs-controller" + "ack-dynamodb-controller" + "ack-ec2-controller" + "ack-ecr-controller" + "ack-ecs-controller" + "ack-efs-controller" + "ack-eks-controller" + "ack-elasticache-controller" + "ack-emrcontainers-controller" + "ack-eventbridge-controller" + "ack-iam-controller" + "ack-kafka-controller" + "ack-keyspaces-controller" + "ack-kinesis-controller" + "ack-kms-controller" + "ack-lambda-controller" + "ack-memorydb-controller" + "ack-mq-controller" + "ack-networkfirewall-controller" + "ack-opensearchservice-controller" + "ack-pipes-controller" + "ack-prometheusservice-controller" + "ack-rds-controller" + "ack-route53-controller" + "ack-route53resolver-controller" + "ack-s3-controller" + "ack-sagemaker-controller" + "ack-secretsmanager-controller" + "ack-sfn-controller" + "ack-sns-controller" + "ack-sqs-controller" + "aerospike-kubernetes-operator" + "airflow-helm-operator" + "aiven-operator" + "akka-cluster-operator" + "alvearie-imaging-ingestion" + "anchore-engine" + "apch-operator" + "api-operator" + "api-testing-operator" + "apicast-community-operator" + "apicurio-registry" + "apimatic-kubernetes-operator" + "app-director-operator" + "appdynamics-operator" + "application-services-metering-operator" + "appranix" + "aqua" + "argocd-operator" + ... + ``` + + !!! important + Currently, OLM 1.0 does not support the installation of extensions that use webhooks or that target a single or specified set of namespaces. + +3. Return list of packages which support `AllNamespaces` install mode, do not use webhooks, and where the channel head version uses `olm.csv.metadata` format: + + ``` terminal + curl -k https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.bundle | jq -cs '[.[] | select(.properties[] | select(.type == "olm.csv.metadata").value.installModes[] | select(.type == "AllNamespaces" and .supported == true) and .spec.webhookdefinitions == null) | .package] | unique[]' + ``` + + ??? success + ``` text title="Example output" + "ack-acm-controller" + "ack-acmpca-controller" + "ack-apigateway-controller" + "ack-apigatewayv2-controller" + "ack-applicationautoscaling-controller" + "ack-athena-controller" + "ack-cloudfront-controller" + "ack-cloudtrail-controller" + "ack-cloudwatch-controller" + "ack-cloudwatchlogs-controller" + "ack-documentdb-controller" + "ack-dynamodb-controller" + "ack-ec2-controller" + "ack-ecr-controller" + "ack-ecs-controller" + ... + ``` + +4. Inspect the contents of an extension's metadata: + + ``` terminal + curl -k https://localhost:8443/catalogs/operatorhubio/api/v1/metas?schema=olm.package&name= + ``` + + `package_name` + : Specifies the name of the package you want to inspect. + + ??? success + ``` text title="Example output" + { + "defaultChannel": "stable-v6.x", + "icon": { + "base64data": "PHN2ZyB4bWxucz0ia... + "mediatype": "image/svg+xml" + }, + "name": "cockroachdb", + "schema": "olm.package" + } + ``` + +### Additional resources + +* [Catalog queries](../howto/catalog-queries.md) diff --git a/go.mod b/go.mod index 1192567ed..b676b007e 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/operator-framework/operator-controller go 1.23.4 require ( - carvel.dev/kapp v0.64.0 github.com/BurntSushi/toml v1.4.0 github.com/Masterminds/semver/v3 v3.3.1 github.com/blang/semver/v4 v4.0.0 @@ -17,6 +16,7 @@ require ( github.com/klauspost/compress v1.18.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 + github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11 github.com/operator-framework/api v0.30.0 github.com/operator-framework/helm-operator-plugins v0.8.0 github.com/operator-framework/operator-registry v1.50.0 @@ -25,10 +25,10 @@ require ( github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 - golang.org/x/sync v0.11.0 - golang.org/x/tools v0.30.0 + golang.org/x/sync v0.12.0 + golang.org/x/tools v0.31.0 gopkg.in/yaml.v2 v2.4.0 - helm.sh/helm/v3 v3.17.1 + helm.sh/helm/v3 v3.17.2 k8s.io/api v0.32.2 k8s.io/apiextensions-apiserver v0.32.2 k8s.io/apimachinery v0.32.2 @@ -43,7 +43,6 @@ require ( ) require ( - carvel.dev/vendir v0.40.0 // indirect cel.dev/expr v0.18.0 // indirect dario.cat/mergo v1.0.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect @@ -76,9 +75,6 @@ require ( github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.0 // indirect github.com/containers/storage v1.56.1 // indirect - github.com/cppforlife/cobrautil v0.0.0-20221130162803-acdfead391ef // indirect - github.com/cppforlife/color v1.9.1-0.20200716202919-6706ac40b835 // indirect - github.com/cppforlife/go-cli-ui v0.0.0-20220425131040-94f26b16bc14 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -133,7 +129,6 @@ require ( github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -141,8 +136,6 @@ require ( github.com/joelanford/ignore v0.1.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/k14s/starlark-go v0.0.0-20200720175618-3a5c849cc368 // indirect - github.com/k14s/ytt v0.36.0 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect @@ -176,7 +169,6 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/gomega v1.36.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect - github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11 // indirect github.com/operator-framework/operator-lib v0.17.0 // indirect github.com/otiai10/copy v1.14.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -203,8 +195,6 @@ require ( github.com/ulikunitz/xz v0.5.12 // indirect github.com/vbatts/tar-split v0.11.6 // indirect github.com/vbauerster/mpb/v8 v8.8.3 // indirect - github.com/vito/go-interact v1.0.1 // indirect - github.com/vmware-tanzu/carvel-kapp-controller v0.51.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect @@ -223,13 +213,13 @@ require ( go.opentelemetry.io/otel/sdk v1.33.0 // indirect go.opentelemetry.io/otel/trace v1.33.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.33.0 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.35.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.37.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.10.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect @@ -242,7 +232,7 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect - k8s.io/kubectl v0.32.1 // indirect + k8s.io/kubectl v0.32.2 // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/go.sum b/go.sum index 56abb8bf6..78ad8c60c 100644 --- a/go.sum +++ b/go.sum @@ -1,24 +1,8 @@ -carvel.dev/kapp v0.64.0 h1:WeQ8XkccOonye7sCxOJnukKgRhWtHGDlt4tY4aFIMJM= -carvel.dev/kapp v0.64.0/go.mod h1:6DoB9+JP27u4ZZbolK7ObmS1vhaVoOVrfqX1pj0Z6MQ= -carvel.dev/vendir v0.40.0 h1:JdhCp/EjAPGI8F5zoAVYwZHf1sPEFee19RpgGb3ciT8= -carvel.dev/vendir v0.40.0/go.mod h1:XPdluJu7322RZNx05AA4gYnV52aKywBdh7Ma12GuM2Q= cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -28,7 +12,6 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -45,7 +28,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -54,20 +36,14 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.26.0/go.mod h1:jJmlefzPfGnckuHdXX7/80O3BvUUi12XOkbv4w9SGLU= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= @@ -75,14 +51,10 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -117,30 +89,11 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= github.com/containers/storage v1.56.1 h1:gDZj/S6Zxus4Xx42X6iNB3ODXuh0qoOdH/BABfrvcKo= github.com/containers/storage v1.56.1/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cppforlife/cobrautil v0.0.0-20200514214827-bb86e6965d72/go.mod h1:2w+qxVu2KSGW78Ex/XaIqfh/OvBgjEsmN53S4T8vEyA= -github.com/cppforlife/cobrautil v0.0.0-20221130162803-acdfead391ef h1:de10GNLe45JTMghl2qf9WH17H/BjGShK41X3vKAsPJA= -github.com/cppforlife/cobrautil v0.0.0-20221130162803-acdfead391ef/go.mod h1:2w+qxVu2KSGW78Ex/XaIqfh/OvBgjEsmN53S4T8vEyA= -github.com/cppforlife/color v1.9.1-0.20200716202919-6706ac40b835 h1:mYQweUIBD+TBRjIeQnJmXr0GSVMpI6O0takyb/aaOgo= -github.com/cppforlife/color v1.9.1-0.20200716202919-6706ac40b835/go.mod h1:dYeVsKp1vvK8XjdTPR1gF+uk+9doxKeO3hqQTOCr7T4= -github.com/cppforlife/go-cli-ui v0.0.0-20200505234325-512793797f05/go.mod h1:I0qrzCmuPWYI6kAOvkllYjaW2aovclWbJ96+v+YyHb0= -github.com/cppforlife/go-cli-ui v0.0.0-20220425131040-94f26b16bc14 h1:MjRdR01xh0sfkeS3OOBv+MYkYsrbHuTDc4rfBnVdFaI= -github.com/cppforlife/go-cli-ui v0.0.0-20220425131040-94f26b16bc14/go.mod h1:AlgTssDlstr4mf92TR4DPITLfl5+7wEY4cKStCmeeto= -github.com/cppforlife/go-patch v0.0.0-20240118020416-2147782e467b h1:+8LQctLhaj+63L/37l8IK/5Q3odN6RzWlglonUwrKok= -github.com/cppforlife/go-patch v0.0.0-20240118020416-2147782e467b/go.mod h1:67a7aIi94FHDZdoeGSJRRFDp66l9MhaAG1yGxpUoFD8= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= @@ -151,10 +104,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/distribution/distribution/v3 v3.0.0-rc.1 h1:6M4ewmPBUhF7wtQ8URLOQ1W/PQuVKiD1u8ymwLDUGqQ= github.com/distribution/distribution/v3 v3.0.0-rc.1/go.mod h1:tFjaPDeHCrLg28e4feBIy27cP+qmrc/mvkl6MFIfVi4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -189,7 +140,6 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -198,13 +148,10 @@ github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7Dlme github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -213,7 +160,6 @@ github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/S github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= @@ -258,19 +204,15 @@ github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncV github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -282,12 +224,9 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.1 h1:AfVXx3chM2qwoSbM7Da8g8hX8OVSkBFwX+rz2+PcK40= @@ -308,79 +247,43 @@ github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lw github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8RYb1Y7fYivughjxojTmIu5iAIjSrSLCLeqE= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hpcloud/tail v1.0.1-0.20180514194441-a1dbeea552b7/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -391,24 +294,13 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k14s/difflib v0.0.0-20201117154628-0c031775bf57/go.mod h1:B0xN2MiNBGWOWi9CcfAo9LBI8IU4J1utlbOIJCsmKr4= -github.com/k14s/difflib v0.0.0-20240118055029-596a7a5585c3 h1:q2ikACDbDDbyUcN9JkDcNMGhIx1EBRkctAsPZMr35qM= -github.com/k14s/difflib v0.0.0-20240118055029-596a7a5585c3/go.mod h1:B0xN2MiNBGWOWi9CcfAo9LBI8IU4J1utlbOIJCsmKr4= -github.com/k14s/starlark-go v0.0.0-20200720175618-3a5c849cc368 h1:4bcRTTSx+LKSxMWibIwzHnDNmaN1x52oEpvnjCy+8vk= -github.com/k14s/starlark-go v0.0.0-20200720175618-3a5c849cc368/go.mod h1:lKGj1op99m4GtQISxoD2t+K+WO/q2NzEPKvfXFQfbCA= -github.com/k14s/ytt v0.36.0 h1:ERr7q+r3ziYJv91fvTx2b76d1MIo3SI/EsAS01WU+Zo= -github.com/k14s/ytt v0.36.0/go.mod h1:awQ3bHBk1qT2Xn3GJVdmaLss2khZOIBBKFd2TNXZNMk= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= @@ -417,13 +309,8 @@ github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -438,16 +325,10 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -457,24 +338,16 @@ github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxU github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -509,21 +382,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -546,26 +408,21 @@ github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= @@ -575,19 +432,15 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= @@ -597,25 +450,18 @@ github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4= github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY= github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= @@ -625,23 +471,12 @@ github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= @@ -661,22 +496,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ= github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk= -github.com/vito/go-interact v0.0.0-20171111012221-fa338ed9e9ec/go.mod h1:wPlfmglZmRWMYv/qJy3P+fK/UnoQB5ISk4txfNd9tDo= -github.com/vito/go-interact v1.0.1 h1:O8xi8c93bRUv2Tb/v6HdiuGc+WnWt+AQzF74MOOdlBs= -github.com/vito/go-interact v1.0.1/go.mod h1:HrdHSJXD2yn1MhlTwSIMeFgQ5WftiIorszVGd3S/DAA= -github.com/vmware-tanzu/carvel-kapp-controller v0.51.0 h1:lCCHy9n/AzWPtq5gqbINJHgmF32RCUkh9DbVQgx6HAs= -github.com/vmware-tanzu/carvel-kapp-controller v0.51.0/go.mod h1:go1MQz1D2kVgjaE2ZHtuHGECFk8EDLeXMpjmDNDzuJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -686,12 +513,10 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= @@ -704,8 +529,6 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -754,177 +577,93 @@ go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qq go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c= golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180730214132-a0f8a16cb08c/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20220411215600-e5f449aeb171/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= @@ -933,8 +672,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -954,45 +691,27 @@ google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwl google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.17.1 h1:gzVoAD+qVuoJU6KDMSAeo0xRJ6N1znRxz3wyuXRmJDk= -helm.sh/helm/v3 v3.17.1/go.mod h1:nvreuhuR+j78NkQcLC3TYoprCKStLyw5P4T7E5itv2w= +helm.sh/helm/v3 v3.17.2 h1:agYQ5ew2jq5vdx2K7q5W44KyKQrnSubUMCQsjkiv3/o= +helm.sh/helm/v3 v3.17.2/go.mod h1:+uJKMH/UiMzZQOALR3XUf3BLIoczI2RKKD6bMhPh4G8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= @@ -1011,13 +730,12 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/kubectl v0.32.1 h1:/btLtXLQUU1rWx8AEvX9jrb9LaI6yeezt3sFALhB8M8= -k8s.io/kubectl v0.32.1/go.mod h1:sezNuyWi1STk4ZNPVRIFfgjqMI6XMf+oCVLjZen/pFQ= +k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us= +k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc= diff --git a/hack/demo/catalogd-demo-script.sh b/hack/demo/catalogd-demo-script.sh index bbde25071..e7f226f24 100755 --- a/hack/demo/catalogd-demo-script.sh +++ b/hack/demo/catalogd-demo-script.sh @@ -23,7 +23,7 @@ kubectl wait --for=condition=Available -n olmv1-system deploy/catalogd-controlle echo "... checking clustercatalog is serving" kubectl wait --for=condition=Serving clustercatalog/operatorhubio --timeout=60s echo "... checking clustercatalog is finished unpacking" -kubectl wait --for=condition=Progressing=False clustercatalog/operatorhubio --timeout=60s +kubectl wait --for=condition=Progressing=True clustercatalog/operatorhubio --timeout=60s # port forward the catalogd-service service to interact with the HTTP server serving catalog contents (kubectl -n olmv1-system port-forward svc/catalogd-service 8081:443)& diff --git a/hack/demo/catalogd-metas-demo-script.sh b/hack/demo/catalogd-metas-demo-script.sh new file mode 100755 index 000000000..63fb84b83 --- /dev/null +++ b/hack/demo/catalogd-metas-demo-script.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# +# Welcome to the catalogd metas API endpoint demo +# +trap 'trap - SIGTERM && kill -- -"$$"' SIGINT SIGTERM EXIT + +kind delete cluster +kind create cluster +kubectl cluster-info --context kind-kind +sleep 10 + +# use the install script from the latest github release +curl -L -s https://github.com/operator-framework/operator-controller/releases/latest/download/install.sh | bash + +# inspect crds (clustercatalog) +kubectl get crds -A +kubectl get clustercatalog -A + +# ... checking catalogd controller is available +kubectl wait --for=condition=Available -n olmv1-system deploy/catalogd-controller-manager --timeout=1m + +# patch the deployment to include the feature gate +kubectl patch -n olmv1-system deploy/catalogd-controller-manager --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--feature-gates=APIV1MetasHandler=true"}]' + +# ... waiting for new deployment for catalogd controller to become available +kubectl rollout status -n olmv1-system deploy/catalogd-controller-manager +kubectl wait --for=condition=Available -n olmv1-system deploy/catalogd-controller-manager --timeout=1m +# ... checking clustercatalog is serving +kubectl wait --for=condition=Serving clustercatalog/operatorhubio --timeout=60s +# ... checking clustercatalog is finished unpacking (progressing gone back to true) +kubectl wait --for=condition=Progressing=True clustercatalog/operatorhubio --timeout=60s + + +# port forward the catalogd-service service to interact with the HTTP server serving catalog contents +(kubectl -n olmv1-system port-forward svc/catalogd-service 8081:443)& + + +# check what 'packages' are available in this catalog +curl -f --retry-all-errors --retry 10 -k 'https://localhost:8081/catalogs/operatorhubio/api/v1/metas?schema=olm.package' | jq -s '.[] | .name' +# check what channels are included in the wavefront package +curl -f -k 'https://localhost:8081/catalogs/operatorhubio/api/v1/metas?schema=olm.channel&package=wavefront' | jq -s '.[] |.name' +# check what bundles are included in the wavefront package +curl -f -k 'https://localhost:8081/catalogs/operatorhubio/api/v1/metas?schema=olm.bundle&package=wavefront' | jq -s '.[] |.name' + diff --git a/internal/operator-controller/conditionsets/conditionsets.go b/internal/operator-controller/conditionsets/conditionsets.go index 1b57a0cd8..c69aff421 100644 --- a/internal/operator-controller/conditionsets/conditionsets.go +++ b/internal/operator-controller/conditionsets/conditionsets.go @@ -31,7 +31,6 @@ var ConditionTypes = []string{ ocv1.TypeChannelDeprecated, ocv1.TypeBundleDeprecated, ocv1.TypeProgressing, - ocv1.TypeServing, } var ConditionReasons = []string{ @@ -40,7 +39,4 @@ var ConditionReasons = []string{ ocv1.ReasonFailed, ocv1.ReasonBlocked, ocv1.ReasonRetrying, - ocv1.ReasonAvailable, - ocv1.ReasonUnavailable, - ocv1.ReasonUserSpecifiedUnavailable, } diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/change_validator.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/change_validator.go new file mode 100644 index 000000000..4678b2de0 --- /dev/null +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/change_validator.go @@ -0,0 +1,171 @@ +// Originally copied from https://github.com/carvel-dev/kapp/tree/d7fc2e15439331aa3a379485bb124e91a0829d2e +// Attribution: +// Copyright 2024 The Carvel Authors. +// SPDX-License-Identifier: Apache-2.0 + +package crdupgradesafety + +import ( + "errors" + "fmt" + "maps" + "reflect" + "slices" + + "github.com/openshift/crd-schema-checker/pkg/manifestcomparators" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ChangeValidation is a function that accepts a FieldDiff +// as a parameter and should return: +// - a boolean representation of whether or not the change +// - an error if the change would be unsafe +// has been fully handled (i.e no additional changes exist) +type ChangeValidation func(diff FieldDiff) (bool, error) + +// ChangeValidator is a Validation implementation focused on +// handling updates to existing fields in a CRD +type ChangeValidator struct { + // Validations is a slice of ChangeValidations + // to run against each changed field + Validations []ChangeValidation +} + +func (cv *ChangeValidator) Name() string { + return "ChangeValidator" +} + +// Validate will compare each version in the provided existing and new CRDs. +// Since the ChangeValidator is tailored to handling updates to existing fields in +// each version of a CRD. As such the following is assumed: +// - Validating the removal of versions during an update is handled outside of this +// validator. If a version in the existing version of the CRD does not exist in the new +// version that version of the CRD is skipped in this validator. +// - Removal of existing fields is unsafe. Regardless of whether or not this is handled +// by a validator outside this one, if a field is present in a version provided by the existing CRD +// but not present in the same version provided by the new CRD this validation will fail. +// +// Additionally, any changes that are not validated and handled by the known ChangeValidations +// are deemed as unsafe and returns an error. +func (cv *ChangeValidator) Validate(old, new apiextensionsv1.CustomResourceDefinition) error { + errs := []error{} + for _, version := range old.Spec.Versions { + newVersion := manifestcomparators.GetVersionByName(&new, version.Name) + if newVersion == nil { + // if the new version doesn't exist skip this version + continue + } + flatOld := FlattenSchema(version.Schema.OpenAPIV3Schema) + flatNew := FlattenSchema(newVersion.Schema.OpenAPIV3Schema) + + diffs, err := CalculateFlatSchemaDiff(flatOld, flatNew) + if err != nil { + errs = append(errs, fmt.Errorf("calculating schema diff for CRD version %q", version.Name)) + continue + } + + for _, field := range slices.Sorted(maps.Keys(diffs)) { + diff := diffs[field] + + handled := false + for _, validation := range cv.Validations { + ok, err := validation(diff) + if err != nil { + errs = append(errs, fmt.Errorf("version %q, field %q: %w", version.Name, field, err)) + } + if ok { + handled = true + break + } + } + + if !handled { + errs = append(errs, fmt.Errorf("version %q, field %q has unknown change, refusing to determine that change is safe", version.Name, field)) + } + } + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +type FieldDiff struct { + Old *apiextensionsv1.JSONSchemaProps + New *apiextensionsv1.JSONSchemaProps +} + +// FlatSchema is a flat representation of a CRD schema. +type FlatSchema map[string]*apiextensionsv1.JSONSchemaProps + +// FlattenSchema takes in a CRD version OpenAPIV3Schema and returns +// a flattened representation of it. For example, a CRD with a schema of: +// ```yaml +// +// ... +// spec: +// type: object +// properties: +// foo: +// type: string +// bar: +// type: string +// ... +// +// ``` +// would be represented as: +// +// map[string]*apiextensionsv1.JSONSchemaProps{ +// "^": {}, +// "^.spec": {}, +// "^.spec.foo": {}, +// "^.spec.bar": {}, +// } +// +// where "^" represents the "root" schema +func FlattenSchema(schema *apiextensionsv1.JSONSchemaProps) FlatSchema { + fieldMap := map[string]*apiextensionsv1.JSONSchemaProps{} + + manifestcomparators.SchemaHas(schema, + field.NewPath("^"), + field.NewPath("^"), + nil, + func(s *apiextensionsv1.JSONSchemaProps, _, simpleLocation *field.Path, _ []*apiextensionsv1.JSONSchemaProps) bool { + fieldMap[simpleLocation.String()] = s.DeepCopy() + return false + }) + + return fieldMap +} + +// CalculateFlatSchemaDiff finds fields in a FlatSchema that are different +// and returns a mapping of field --> old and new field schemas. If a field +// exists in the old FlatSchema but not the new an empty diff mapping and an error is returned. +func CalculateFlatSchemaDiff(o, n FlatSchema) (map[string]FieldDiff, error) { + diffMap := map[string]FieldDiff{} + for field, schema := range o { + if _, ok := n[field]; !ok { + return diffMap, fmt.Errorf("field %q in existing not found in new", field) + } + newSchema := n[field] + + // Copy the schemas and remove any child properties for comparison. + // In theory this will focus in on detecting changes for only the + // field we are looking at and ignore changes in the children fields. + // Since we are iterating through the map that should have all fields + // we should still detect changes in the children fields. + oldCopy := schema.DeepCopy() + newCopy := newSchema.DeepCopy() + oldCopy.Properties, oldCopy.Items = nil, nil + newCopy.Properties, newCopy.Items = nil, nil + if !reflect.DeepEqual(oldCopy, newCopy) { + diffMap[field] = FieldDiff{ + Old: oldCopy, + New: newCopy, + } + } + } + return diffMap, nil +} diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/change_validator_test.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/change_validator_test.go new file mode 100644 index 000000000..cc12bc5c1 --- /dev/null +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/change_validator_test.go @@ -0,0 +1,338 @@ +// Originally copied from https://github.com/carvel-dev/kapp/tree/d7fc2e15439331aa3a379485bb124e91a0829d2e +// Attribution: +// Copyright 2024 The Carvel Authors. +// SPDX-License-Identifier: Apache-2.0 + +package crdupgradesafety_test + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/preflights/crdupgradesafety" +) + +func TestCalculateFlatSchemaDiff(t *testing.T) { + for _, tc := range []struct { + name string + old crdupgradesafety.FlatSchema + new crdupgradesafety.FlatSchema + expectedDiff map[string]crdupgradesafety.FieldDiff + shouldError bool + }{ + { + name: "no diff in schemas, empty diff, no error", + old: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{}, + }, + new: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{}, + }, + expectedDiff: map[string]crdupgradesafety.FieldDiff{}, + }, + { + name: "diff in schemas, diff returned, no error", + old: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{}, + }, + new: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{ + ID: "bar", + }, + }, + expectedDiff: map[string]crdupgradesafety.FieldDiff{ + "foo": { + Old: &apiextensionsv1.JSONSchemaProps{}, + New: &apiextensionsv1.JSONSchemaProps{ID: "bar"}, + }, + }, + }, + { + name: "diff in child properties only, no diff returned, no error", + old: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "bar": {ID: "bar"}, + }, + }, + }, + new: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "bar": {ID: "baz"}, + }, + }, + }, + expectedDiff: map[string]crdupgradesafety.FieldDiff{}, + }, + { + name: "diff in child items only, no diff returned, no error", + old: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{ + Items: &apiextensionsv1.JSONSchemaPropsOrArray{Schema: &apiextensionsv1.JSONSchemaProps{ID: "bar"}}, + }, + }, + new: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{ + Items: &apiextensionsv1.JSONSchemaPropsOrArray{Schema: &apiextensionsv1.JSONSchemaProps{ID: "baz"}}, + }, + }, + expectedDiff: map[string]crdupgradesafety.FieldDiff{}, + }, + { + name: "field exists in old but not new, no diff returned, error", + old: crdupgradesafety.FlatSchema{ + "foo": &apiextensionsv1.JSONSchemaProps{}, + }, + new: crdupgradesafety.FlatSchema{ + "bar": &apiextensionsv1.JSONSchemaProps{}, + }, + expectedDiff: map[string]crdupgradesafety.FieldDiff{}, + shouldError: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + diff, err := crdupgradesafety.CalculateFlatSchemaDiff(tc.old, tc.new) + assert.Equal(t, tc.shouldError, err != nil, "should error? - %v", tc.shouldError) + assert.Equal(t, tc.expectedDiff, diff) + }) + } +} + +func TestFlattenSchema(t *testing.T) { + schema := &apiextensionsv1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "foo": { + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "bar": {}, + }, + }, + "baz": {}, + }, + } + + foo := schema.Properties["foo"] + foobar := schema.Properties["foo"].Properties["bar"] + baz := schema.Properties["baz"] + expected := crdupgradesafety.FlatSchema{ + "^": schema, + "^.foo": &foo, + "^.foo.bar": &foobar, + "^.baz": &baz, + } + + actual := crdupgradesafety.FlattenSchema(schema) + + assert.Equal(t, expected, actual) +} + +func TestChangeValidator(t *testing.T) { + validationErr1 := errors.New(`version "v1alpha1", field "^" has unknown change, refusing to determine that change is safe`) + validationErr2 := errors.New(`version "v1alpha1", field "^": fail`) + + for _, tc := range []struct { + name string + changeValidator *crdupgradesafety.ChangeValidator + old apiextensionsv1.CustomResourceDefinition + new apiextensionsv1.CustomResourceDefinition + expectedError error + }{ + { + name: "no changes, no error", + changeValidator: &crdupgradesafety.ChangeValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, errors.New("should not run") + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + }, + { + name: "changes, validation successful, change is fully handled, no error", + changeValidator: &crdupgradesafety.ChangeValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return true, nil + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + { + name: "changes, validation successful, change not fully handled, error", + changeValidator: &crdupgradesafety.ChangeValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, nil + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + expectedError: validationErr1, + }, + { + name: "changes, validation failed, change fully handled, error", + changeValidator: &crdupgradesafety.ChangeValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return true, errors.New("fail") + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + expectedError: validationErr2, + }, + { + name: "changes, validation failed, change not fully handled, ordered error", + changeValidator: &crdupgradesafety.ChangeValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, errors.New("fail") + }, + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, errors.New("error") + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + expectedError: fmt.Errorf("%w\n%s\n%w", validationErr2, `version "v1alpha1", field "^": error`, validationErr1), + }, + } { + t.Run(tc.name, func(t *testing.T) { + err := tc.changeValidator.Validate(tc.old, tc.new) + if tc.expectedError != nil { + assert.EqualError(t, err, tc.expectedError.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks.go index b795b11de..669f65e57 100644 --- a/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks.go @@ -3,87 +3,22 @@ package crdupgradesafety import ( "bytes" "cmp" - "errors" "fmt" "reflect" - "slices" - kappcus "carvel.dev/kapp/pkg/kapp/crdupgradesafety" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/sets" - versionhelper "k8s.io/apimachinery/pkg/version" ) -type ServedVersionValidator struct { - Validations []kappcus.ChangeValidation -} - -func (c *ServedVersionValidator) Validate(old, new apiextensionsv1.CustomResourceDefinition) error { - // If conversion webhook is specified, pass check - if new.Spec.Conversion != nil && new.Spec.Conversion.Strategy == apiextensionsv1.WebhookConverter { - return nil - } - - errs := []error{} - servedVersions := []apiextensionsv1.CustomResourceDefinitionVersion{} - for _, version := range new.Spec.Versions { - if version.Served { - servedVersions = append(servedVersions, version) - } - } - - slices.SortFunc(servedVersions, func(a, b apiextensionsv1.CustomResourceDefinitionVersion) int { - return versionhelper.CompareKubeAwareVersionStrings(a.Name, b.Name) - }) - - for i, oldVersion := range servedVersions[:len(servedVersions)-1] { - for _, newVersion := range servedVersions[i+1:] { - flatOld := kappcus.FlattenSchema(oldVersion.Schema.OpenAPIV3Schema) - flatNew := kappcus.FlattenSchema(newVersion.Schema.OpenAPIV3Schema) - diffs, err := kappcus.CalculateFlatSchemaDiff(flatOld, flatNew) - if err != nil { - errs = append(errs, fmt.Errorf("calculating schema diff between CRD versions %q and %q", oldVersion.Name, newVersion.Name)) - continue - } - - for field, diff := range diffs { - handled := false - for _, validation := range c.Validations { - ok, err := validation(diff) - if err != nil { - errs = append(errs, fmt.Errorf("version upgrade %q to %q, field %q: %w", oldVersion.Name, newVersion.Name, field, err)) - } - if ok { - handled = true - break - } - } - - if !handled { - errs = append(errs, fmt.Errorf("version %q, field %q has unknown change, refusing to determine that change is safe", oldVersion.Name, field)) - } - } - } - } - if len(errs) > 0 { - return errors.Join(errs...) - } - return nil -} - -func (c *ServedVersionValidator) Name() string { - return "ServedVersionValidator" -} - -type resetFunc func(diff kappcus.FieldDiff) kappcus.FieldDiff +type resetFunc func(diff FieldDiff) FieldDiff -func isHandled(diff kappcus.FieldDiff, reset resetFunc) bool { +func isHandled(diff FieldDiff, reset resetFunc) bool { diff = reset(diff) return reflect.DeepEqual(diff.Old, diff.New) } -func Enum(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func Enum(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.Enum = []apiextensionsv1.JSON{} diff.New.Enum = []apiextensionsv1.JSON{} return diff @@ -111,8 +46,8 @@ func Enum(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func Required(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func Required(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.Required = []string{} diff.New.Required = []string{} return diff @@ -141,8 +76,8 @@ func maxVerification[T cmp.Ordered](older *T, newer *T) error { return err } -func Maximum(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func Maximum(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.Maximum = nil diff.New.Maximum = nil return diff @@ -156,8 +91,8 @@ func Maximum(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func MaxItems(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func MaxItems(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.MaxItems = nil diff.New.MaxItems = nil return diff @@ -171,8 +106,8 @@ func MaxItems(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func MaxLength(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func MaxLength(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.MaxLength = nil diff.New.MaxLength = nil return diff @@ -186,8 +121,8 @@ func MaxLength(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func MaxProperties(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func MaxProperties(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.MaxProperties = nil diff.New.MaxProperties = nil return diff @@ -212,8 +147,8 @@ func minVerification[T cmp.Ordered](older *T, newer *T) error { return err } -func Minimum(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func Minimum(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.Minimum = nil diff.New.Minimum = nil return diff @@ -227,8 +162,8 @@ func Minimum(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func MinItems(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func MinItems(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.MinItems = nil diff.New.MinItems = nil return diff @@ -242,8 +177,8 @@ func MinItems(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func MinLength(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func MinLength(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.MinLength = nil diff.New.MinLength = nil return diff @@ -257,8 +192,8 @@ func MinLength(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func MinProperties(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func MinProperties(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.MinProperties = nil diff.New.MinProperties = nil return diff @@ -272,8 +207,8 @@ func MinProperties(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func Default(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func Default(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.Default = nil diff.New.Default = nil return diff @@ -293,8 +228,8 @@ func Default(diff kappcus.FieldDiff) (bool, error) { return isHandled(diff, reset), err } -func Type(diff kappcus.FieldDiff) (bool, error) { - reset := func(diff kappcus.FieldDiff) kappcus.FieldDiff { +func Type(diff FieldDiff) (bool, error) { + reset := func(diff FieldDiff) FieldDiff { diff.Old.Type = "" diff.New.Type = "" return diff diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks_test.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks_test.go index 5e1bee3fd..36618b584 100644 --- a/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks_test.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/checks_test.go @@ -2,10 +2,8 @@ package crdupgradesafety import ( "errors" - "fmt" "testing" - kappcus "carvel.dev/kapp/pkg/kapp/crdupgradesafety" "github.com/stretchr/testify/require" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/utils/ptr" @@ -13,7 +11,7 @@ import ( type testcase struct { name string - diff kappcus.FieldDiff + diff FieldDiff err error handled bool } @@ -22,7 +20,7 @@ func TestEnum(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Enum: []apiextensionsv1.JSON{ { @@ -43,7 +41,7 @@ func TestEnum(t *testing.T) { }, { name: "new enum constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Enum: []apiextensionsv1.JSON{}, }, @@ -60,7 +58,7 @@ func TestEnum(t *testing.T) { }, { name: "remove enum value, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Enum: []apiextensionsv1.JSON{ { @@ -84,7 +82,7 @@ func TestEnum(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -97,7 +95,7 @@ func TestEnum(t *testing.T) { }, { name: "different field changed with enum, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", Enum: []apiextensionsv1.JSON{ @@ -131,7 +129,7 @@ func TestRequired(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Required: []string{ "foo", @@ -148,7 +146,7 @@ func TestRequired(t *testing.T) { }, { name: "new required field, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ Required: []string{ @@ -161,7 +159,7 @@ func TestRequired(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -185,7 +183,7 @@ func TestMaximum(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Maximum: ptr.To(10.0), }, @@ -198,7 +196,7 @@ func TestMaximum(t *testing.T) { }, { name: "new maximum constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ Maximum: ptr.To(10.0), @@ -209,7 +207,7 @@ func TestMaximum(t *testing.T) { }, { name: "maximum constraint decreased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Maximum: ptr.To(20.0), }, @@ -222,7 +220,7 @@ func TestMaximum(t *testing.T) { }, { name: "maximum constraint increased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Maximum: ptr.To(20.0), }, @@ -235,7 +233,7 @@ func TestMaximum(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -259,7 +257,7 @@ func TestMaxItems(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxItems: ptr.To(int64(10)), }, @@ -272,7 +270,7 @@ func TestMaxItems(t *testing.T) { }, { name: "new maxItems constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ MaxItems: ptr.To(int64(10)), @@ -283,7 +281,7 @@ func TestMaxItems(t *testing.T) { }, { name: "maxItems constraint decreased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxItems: ptr.To(int64(20)), }, @@ -296,7 +294,7 @@ func TestMaxItems(t *testing.T) { }, { name: "maxitems constraint increased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxItems: ptr.To(int64(10)), }, @@ -309,7 +307,7 @@ func TestMaxItems(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -333,7 +331,7 @@ func TestMaxLength(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxLength: ptr.To(int64(10)), }, @@ -346,7 +344,7 @@ func TestMaxLength(t *testing.T) { }, { name: "new maxLength constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ MaxLength: ptr.To(int64(10)), @@ -357,7 +355,7 @@ func TestMaxLength(t *testing.T) { }, { name: "maxLength constraint decreased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxLength: ptr.To(int64(20)), }, @@ -370,7 +368,7 @@ func TestMaxLength(t *testing.T) { }, { name: "maxLength constraint increased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxLength: ptr.To(int64(10)), }, @@ -383,7 +381,7 @@ func TestMaxLength(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -407,7 +405,7 @@ func TestMaxProperties(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxProperties: ptr.To(int64(10)), }, @@ -420,7 +418,7 @@ func TestMaxProperties(t *testing.T) { }, { name: "new maxProperties constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ MaxProperties: ptr.To(int64(10)), @@ -431,7 +429,7 @@ func TestMaxProperties(t *testing.T) { }, { name: "maxProperties constraint decreased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxProperties: ptr.To(int64(20)), }, @@ -444,7 +442,7 @@ func TestMaxProperties(t *testing.T) { }, { name: "maxProperties constraint increased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MaxProperties: ptr.To(int64(10)), }, @@ -457,7 +455,7 @@ func TestMaxProperties(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -481,7 +479,7 @@ func TestMinItems(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinItems: ptr.To(int64(10)), }, @@ -494,7 +492,7 @@ func TestMinItems(t *testing.T) { }, { name: "new minItems constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ MinItems: ptr.To(int64(10)), @@ -505,7 +503,7 @@ func TestMinItems(t *testing.T) { }, { name: "minItems constraint decreased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinItems: ptr.To(int64(20)), }, @@ -518,7 +516,7 @@ func TestMinItems(t *testing.T) { }, { name: "minItems constraint increased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinItems: ptr.To(int64(10)), }, @@ -531,7 +529,7 @@ func TestMinItems(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -555,7 +553,7 @@ func TestMinimum(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Minimum: ptr.To(10.0), }, @@ -568,7 +566,7 @@ func TestMinimum(t *testing.T) { }, { name: "new minimum constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ Minimum: ptr.To(10.0), @@ -579,7 +577,7 @@ func TestMinimum(t *testing.T) { }, { name: "minLength constraint decreased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Minimum: ptr.To(20.0), }, @@ -592,7 +590,7 @@ func TestMinimum(t *testing.T) { }, { name: "minLength constraint increased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Minimum: ptr.To(10.0), }, @@ -605,7 +603,7 @@ func TestMinimum(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -629,7 +627,7 @@ func TestMinLength(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinLength: ptr.To(int64(10)), }, @@ -642,7 +640,7 @@ func TestMinLength(t *testing.T) { }, { name: "new minLength constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ MinLength: ptr.To(int64(10)), @@ -653,7 +651,7 @@ func TestMinLength(t *testing.T) { }, { name: "minLength constraint decreased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinLength: ptr.To(int64(20)), }, @@ -666,7 +664,7 @@ func TestMinLength(t *testing.T) { }, { name: "minLength constraint increased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinLength: ptr.To(int64(10)), }, @@ -679,7 +677,7 @@ func TestMinLength(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -703,7 +701,7 @@ func TestMinProperties(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinProperties: ptr.To(int64(10)), }, @@ -716,7 +714,7 @@ func TestMinProperties(t *testing.T) { }, { name: "new minProperties constraint, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ MinProperties: ptr.To(int64(10)), @@ -727,7 +725,7 @@ func TestMinProperties(t *testing.T) { }, { name: "minProperties constraint decreased, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinProperties: ptr.To(int64(20)), }, @@ -740,7 +738,7 @@ func TestMinProperties(t *testing.T) { }, { name: "minProperties constraint increased, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ MinProperties: ptr.To(int64(10)), }, @@ -753,7 +751,7 @@ func TestMinProperties(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -777,7 +775,7 @@ func TestDefault(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Default: &apiextensionsv1.JSON{ Raw: []byte("foo"), @@ -794,7 +792,7 @@ func TestDefault(t *testing.T) { }, { name: "new default value, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{}, New: &apiextensionsv1.JSONSchemaProps{ Default: &apiextensionsv1.JSON{ @@ -807,7 +805,7 @@ func TestDefault(t *testing.T) { }, { name: "default value removed, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Default: &apiextensionsv1.JSON{ Raw: []byte("foo"), @@ -820,7 +818,7 @@ func TestDefault(t *testing.T) { }, { name: "default value changed, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Default: &apiextensionsv1.JSON{ Raw: []byte("foo"), @@ -837,7 +835,7 @@ func TestDefault(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -861,7 +859,7 @@ func TestType(t *testing.T) { for _, tc := range []testcase{ { name: "no diff, no error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Type: "string", }, @@ -874,7 +872,7 @@ func TestType(t *testing.T) { }, { name: "type changed, error, handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ Type: "string", }, @@ -887,7 +885,7 @@ func TestType(t *testing.T) { }, { name: "different field changed, no error, not handled", - diff: kappcus.FieldDiff{ + diff: FieldDiff{ Old: &apiextensionsv1.JSONSchemaProps{ ID: "foo", }, @@ -906,81 +904,3 @@ func TestType(t *testing.T) { }) } } - -func TestOrderKappsValidateErr(t *testing.T) { - testErr1 := errors.New("fallback1") - testErr2 := errors.New("fallback2") - - generateErrors := func(n int, base string) []error { - var result []error - for i := n; i >= 0; i-- { - result = append(result, fmt.Errorf("%s%d", base, i)) - } - return result - } - - joinedAndNested := func(format string, errs ...error) error { - return fmt.Errorf(format, errors.Join(errs...)) - } - - testCases := []struct { - name string - inpuError error - expectedError error - }{ - { - name: "fallback: initial error was not error.Join'ed", - inpuError: testErr1, - expectedError: testErr1, - }, - { - name: "fallback: nested error was not wrapped", - inpuError: errors.Join(testErr1), - expectedError: testErr1, - }, - { - name: "fallback: multiple nested errors, one was not wrapped", - inpuError: errors.Join(testErr2, fmt.Errorf("%w", testErr1)), - expectedError: errors.Join(testErr2, fmt.Errorf("%w", testErr1)), - }, - { - name: "fallback: nested error did not contain \":\"", - inpuError: errors.Join(fmt.Errorf("%w", testErr1)), - expectedError: testErr1, - }, - { - name: "fallback: multiple nested errors, one did not contain \":\"", - inpuError: errors.Join(joinedAndNested("fail: %w", testErr2), joinedAndNested("%w", testErr1)), - expectedError: errors.Join(fmt.Errorf("fail: %w", testErr2), testErr1), - }, - { - name: "fallback: nested error was not error.Join'ed", - inpuError: errors.Join(fmt.Errorf("fail: %w", testErr1)), - expectedError: fmt.Errorf("fail: %w", testErr1), - }, - { - name: "fallback: multiple nested errors, one was not error.Join'ed", - inpuError: errors.Join(joinedAndNested("fail: %w", testErr2), fmt.Errorf("fail: %w", testErr1)), - expectedError: fmt.Errorf("fail: %w\nfail: %w", testErr2, testErr1), - }, - { - name: "ensures order for a single group of multiple deeply nested errors", - inpuError: errors.Join(joinedAndNested("fail: %w", testErr2, testErr1)), - expectedError: fmt.Errorf("fail: %w\n%w", testErr1, testErr2), - }, - { - name: "ensures order for multiple groups of deeply nested errors", - inpuError: errors.Join( - joinedAndNested("fail: %w", testErr2, testErr1), - joinedAndNested("validation: %w", generateErrors(5, "err")...), - ), - expectedError: fmt.Errorf("fail: %w\n%w\nvalidation: err0\nerr1\nerr2\nerr3\nerr4\nerr5", testErr1, testErr2), - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - err := orderKappsValidateErr(tc.inpuError) - require.EqualError(t, err, tc.expectedError.Error()) - }) - } -} diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go index 0577c38b3..6bc177cd1 100644 --- a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go @@ -1,14 +1,11 @@ package crdupgradesafety import ( - "cmp" "context" "errors" "fmt" - "slices" "strings" - kappcus "carvel.dev/kapp/pkg/kapp/crdupgradesafety" "helm.sh/helm/v3/pkg/release" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" @@ -21,7 +18,7 @@ import ( type Option func(p *Preflight) -func WithValidator(v *kappcus.Validator) Option { +func WithValidator(v *Validator) Option { return func(p *Preflight) { p.validator = v } @@ -29,11 +26,11 @@ func WithValidator(v *kappcus.Validator) Option { type Preflight struct { crdClient apiextensionsv1client.CustomResourceDefinitionInterface - validator *kappcus.Validator + validator *Validator } func NewPreflight(crdCli apiextensionsv1client.CustomResourceDefinitionInterface, opts ...Option) *Preflight { - changeValidations := []kappcus.ChangeValidation{ + changeValidations := []ChangeValidation{ Enum, Required, Maximum, @@ -50,13 +47,13 @@ func NewPreflight(crdCli apiextensionsv1client.CustomResourceDefinitionInterface p := &Preflight{ crdClient: crdCli, // create a default validator. Can be overridden via the options - validator: &kappcus.Validator{ - Validations: []kappcus.Validation{ - kappcus.NewValidationFunc("NoScopeChange", kappcus.NoScopeChange), - kappcus.NewValidationFunc("NoStoredVersionRemoved", kappcus.NoStoredVersionRemoved), - kappcus.NewValidationFunc("NoExistingFieldRemoved", kappcus.NoExistingFieldRemoved), + validator: &Validator{ + Validations: []Validation{ + NewValidationFunc("NoScopeChange", NoScopeChange), + NewValidationFunc("NoStoredVersionRemoved", NoStoredVersionRemoved), + NewValidationFunc("NoExistingFieldRemoved", NoExistingFieldRemoved), &ServedVersionValidator{Validations: changeValidations}, - &kappcus.ChangeValidator{Validations: changeValidations}, + &ChangeValidator{Validations: changeValidations}, }, }, } @@ -114,71 +111,9 @@ func (p *Preflight) runPreflight(ctx context.Context, rel *release.Release) erro err = p.validator.Validate(*oldCrd, *newCrd) if err != nil { - err = orderKappsValidateErr(err) validateErrors = append(validateErrors, fmt.Errorf("validating upgrade for CRD %q failed: %w", newCrd.Name, err)) } } return errors.Join(validateErrors...) } - -// orderKappsValidateErr is meant as a temporary solution to the problem -// of randomly ordered multi-line validation error returned by kapp's validator.Validate() -// -// The problem is that kapp's field validations are performed in map iteration order, which is not fixed. -// Errors from those validations are then error.Join'ed, fmt.Errorf'ed and error.Join'ed again, -// which means original messages are available at 3rd level of nesting, and this is where we need to -// sort them to ensure we do not enter into constant reconciliation loop because of random order of -// failure message we ultimately set in ClusterExtension's status conditions. -// -// This helper attempts to do that and falls back to original unchanged error message -// in case of any unforeseen issues which likely mean that the internals of validator.Validate -// have changed. -// -// For full context see: -// github.com/operator-framework/operator-controller/issues/1456 (original issue and comments) -// github.com/carvel-dev/kapp/pull/1047 (PR to ensure order in upstream) -// -// TODO: remove this once ordering has been handled by the upstream. -func orderKappsValidateErr(err error) error { - joinedValidationErrs, ok := err.(interface{ Unwrap() []error }) - if !ok { - return err - } - - // nolint: prealloc - var errs []error - for _, validationErr := range joinedValidationErrs.Unwrap() { - unwrappedValidationErr := errors.Unwrap(validationErr) - // validator.Validate did not error.Join'ed validation errors - // kapp's internals changed - fallback to original error - if unwrappedValidationErr == nil { - return err - } - - prefix, _, ok := strings.Cut(validationErr.Error(), ":") - // kapp's internal error format changed - fallback to original error - if !ok { - return err - } - - // attempt to unwrap and sort field errors - joinedFieldErrs, ok := unwrappedValidationErr.(interface{ Unwrap() []error }) - // ChangeValidator did not error.Join'ed field validation errors - // kapp's internals changed - fallback to original error - if !ok { - return err - } - - // ensure order of the field validation errors - unwrappedFieldErrs := joinedFieldErrs.Unwrap() - slices.SortFunc(unwrappedFieldErrs, func(a, b error) int { - return cmp.Compare(a.Error(), b.Error()) - }) - - // re-join the sorted field errors keeping the original error prefix from kapp - errs = append(errs, fmt.Errorf("%s: %w", prefix, errors.Join(unwrappedFieldErrs...))) - } - - return errors.Join(errs...) -} diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go index 47e9d951b..12241bd7f 100644 --- a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go @@ -7,7 +7,6 @@ import ( "strings" "testing" - kappcus "carvel.dev/kapp/pkg/kapp/crdupgradesafety" "github.com/stretchr/testify/require" "helm.sh/helm/v3/pkg/release" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -31,7 +30,7 @@ func (c *MockCRDGetter) Get(ctx context.Context, name string, options metav1.Get return c.oldCrd, c.getErr } -func newMockPreflight(crd *apiextensionsv1.CustomResourceDefinition, err error, customValidator *kappcus.Validator) *crdupgradesafety.Preflight { +func newMockPreflight(crd *apiextensionsv1.CustomResourceDefinition, err error, customValidator *crdupgradesafety.Validator) *crdupgradesafety.Preflight { var preflightOpts []crdupgradesafety.Option if customValidator != nil { preflightOpts = append(preflightOpts, crdupgradesafety.WithValidator(customValidator)) @@ -76,7 +75,7 @@ func TestInstall(t *testing.T) { tests := []struct { name string oldCrdPath string - validator *kappcus.Validator + validator *crdupgradesafety.Validator release *release.Release wantErrMsgs []string wantCrdGetErr error @@ -137,9 +136,9 @@ func TestInstall(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "old-crd.json"), }, - validator: &kappcus.Validator{ - Validations: []kappcus.Validation{ - kappcus.NewValidationFunc("test", func(old, new apiextensionsv1.CustomResourceDefinition) error { + validator: &crdupgradesafety.Validator{ + Validations: []crdupgradesafety.Validation{ + crdupgradesafety.NewValidationFunc("test", func(old, new apiextensionsv1.CustomResourceDefinition) error { return fmt.Errorf("custom validation error!!") }), }, @@ -213,7 +212,7 @@ func TestUpgrade(t *testing.T) { tests := []struct { name string oldCrdPath string - validator *kappcus.Validator + validator *crdupgradesafety.Validator release *release.Release wantErrMsgs []string wantCrdGetErr error @@ -274,9 +273,9 @@ func TestUpgrade(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "old-crd.json"), }, - validator: &kappcus.Validator{ - Validations: []kappcus.Validation{ - kappcus.NewValidationFunc("test", func(old, new apiextensionsv1.CustomResourceDefinition) error { + validator: &crdupgradesafety.Validator{ + Validations: []crdupgradesafety.Validation{ + crdupgradesafety.NewValidationFunc("test", func(old, new apiextensionsv1.CustomResourceDefinition) error { return fmt.Errorf("custom validation error!!") }), }, diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/shared_version_validator.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/shared_version_validator.go new file mode 100644 index 000000000..d66f1ed9c --- /dev/null +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/shared_version_validator.go @@ -0,0 +1,74 @@ +package crdupgradesafety + +import ( + "errors" + "fmt" + "maps" + "slices" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + versionhelper "k8s.io/apimachinery/pkg/version" +) + +type ServedVersionValidator struct { + Validations []ChangeValidation +} + +func (c *ServedVersionValidator) Validate(old, new apiextensionsv1.CustomResourceDefinition) error { + // If conversion webhook is specified, pass check + if new.Spec.Conversion != nil && new.Spec.Conversion.Strategy == apiextensionsv1.WebhookConverter { + return nil + } + + errs := []error{} + servedVersions := []apiextensionsv1.CustomResourceDefinitionVersion{} + for _, version := range new.Spec.Versions { + if version.Served { + servedVersions = append(servedVersions, version) + } + } + + slices.SortFunc(servedVersions, func(a, b apiextensionsv1.CustomResourceDefinitionVersion) int { + return versionhelper.CompareKubeAwareVersionStrings(a.Name, b.Name) + }) + + for i, oldVersion := range servedVersions[:len(servedVersions)-1] { + for _, newVersion := range servedVersions[i+1:] { + flatOld := FlattenSchema(oldVersion.Schema.OpenAPIV3Schema) + flatNew := FlattenSchema(newVersion.Schema.OpenAPIV3Schema) + diffs, err := CalculateFlatSchemaDiff(flatOld, flatNew) + if err != nil { + errs = append(errs, fmt.Errorf("calculating schema diff between CRD versions %q and %q", oldVersion.Name, newVersion.Name)) + continue + } + + for _, field := range slices.Sorted(maps.Keys(diffs)) { + diff := diffs[field] + + handled := false + for _, validation := range c.Validations { + ok, err := validation(diff) + if err != nil { + errs = append(errs, fmt.Errorf("version upgrade %q to %q, field %q: %w", oldVersion.Name, newVersion.Name, field, err)) + } + if ok { + handled = true + break + } + } + + if !handled { + errs = append(errs, fmt.Errorf("version %q, field %q has unknown change, refusing to determine that change is safe", oldVersion.Name, field)) + } + } + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +func (c *ServedVersionValidator) Name() string { + return "ServedVersionValidator" +} diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/shared_version_validator_test.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/shared_version_validator_test.go new file mode 100644 index 000000000..67b0c6205 --- /dev/null +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/shared_version_validator_test.go @@ -0,0 +1,191 @@ +package crdupgradesafety_test + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/preflights/crdupgradesafety" +) + +func TestServedVersionValidator(t *testing.T) { + validationErr1 := errors.New(`version "v1alpha1", field "^" has unknown change, refusing to determine that change is safe`) + validationErr2 := errors.New(`version upgrade "v1alpha1" to "v1alpha2", field "^": fail`) + + for _, tc := range []struct { + name string + servedVersionValidator *crdupgradesafety.ServedVersionValidator + new apiextensionsv1.CustomResourceDefinition + expectedError error + }{ + { + name: "no changes, no error", + servedVersionValidator: &crdupgradesafety.ServedVersionValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, errors.New("should not run") + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + }, + }, + }, + { + name: "changes, validation successful, change is fully handled, no error", + servedVersionValidator: &crdupgradesafety.ServedVersionValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return true, nil + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + { + Name: "v1alpha2", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + { + name: "changes, validation successful, change not fully handled, error", + servedVersionValidator: &crdupgradesafety.ServedVersionValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, nil + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + { + Name: "v1alpha2", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + expectedError: validationErr1, + }, + { + name: "changes, validation failed, change fully handled, error", + servedVersionValidator: &crdupgradesafety.ServedVersionValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return true, errors.New("fail") + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + { + Name: "v1alpha2", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + expectedError: validationErr2, + }, + { + name: "changes, validation failed, change not fully handled, ordered error", + servedVersionValidator: &crdupgradesafety.ServedVersionValidator{ + Validations: []crdupgradesafety.ChangeValidation{ + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, errors.New("fail") + }, + func(_ crdupgradesafety.FieldDiff) (bool, error) { + return false, errors.New("error") + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + { + Name: "v1alpha2", + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + ID: "foo", + }, + }, + }, + }, + }, + }, + expectedError: fmt.Errorf("%w\n%s\n%w", validationErr2, `version upgrade "v1alpha1" to "v1alpha2", field "^": error`, validationErr1), + }, + } { + t.Run(tc.name, func(t *testing.T) { + err := tc.servedVersionValidator.Validate(apiextensionsv1.CustomResourceDefinition{}, tc.new) + if tc.expectedError != nil { + assert.EqualError(t, err, tc.expectedError.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/validator.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/validator.go similarity index 74% rename from vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/validator.go rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/validator.go index 9cb970b56..6fec6cbe5 100644 --- a/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/validator.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/validator.go @@ -1,5 +1,7 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 +// Originally copied from https://github.com/carvel-dev/kapp/tree/d7fc2e15439331aa3a379485bb124e91a0829d2e +// Attribution: +// Copyright 2024 The Carvel Authors. +// SPDX-License-Identifier: Apache-2.0 package crdupgradesafety @@ -9,7 +11,7 @@ import ( "strings" "github.com/openshift/crd-schema-checker/pkg/manifestcomparators" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/sets" ) @@ -18,7 +20,7 @@ import ( type Validation interface { // Validate contains the actual validation logic. An error being // returned means validation has failed - Validate(old, new v1.CustomResourceDefinition) error + Validate(old, new apiextensionsv1.CustomResourceDefinition) error // Name returns a human-readable name for the validation Name() string } @@ -26,7 +28,7 @@ type Validation interface { // ValidateFunc is a function to validate a CustomResourceDefinition // for safe upgrades. It accepts the old and new CRDs and returns an // error if performing an upgrade from old -> new is unsafe. -type ValidateFunc func(old, new v1.CustomResourceDefinition) error +type ValidateFunc func(old, new apiextensionsv1.CustomResourceDefinition) error // ValidationFunc is a helper to wrap a ValidateFunc // as an implementation of the Validation interface @@ -46,7 +48,7 @@ func (vf *ValidationFunc) Name() string { return vf.name } -func (vf *ValidationFunc) Validate(old, new v1.CustomResourceDefinition) error { +func (vf *ValidationFunc) Validate(old, new apiextensionsv1.CustomResourceDefinition) error { return vf.validateFunc(old, new) } @@ -54,7 +56,7 @@ type Validator struct { Validations []Validation } -func (v *Validator) Validate(old, new v1.CustomResourceDefinition) error { +func (v *Validator) Validate(old, new apiextensionsv1.CustomResourceDefinition) error { validateErrs := []error{} for _, validation := range v.Validations { if err := validation.Validate(old, new); err != nil { @@ -70,14 +72,14 @@ func (v *Validator) Validate(old, new v1.CustomResourceDefinition) error { return nil } -func NoScopeChange(old, new v1.CustomResourceDefinition) error { +func NoScopeChange(old, new apiextensionsv1.CustomResourceDefinition) error { if old.Spec.Scope != new.Spec.Scope { return fmt.Errorf("scope changed from %q to %q", old.Spec.Scope, new.Spec.Scope) } return nil } -func NoStoredVersionRemoved(old, new v1.CustomResourceDefinition) error { +func NoStoredVersionRemoved(old, new apiextensionsv1.CustomResourceDefinition) error { newVersions := sets.New[string]() for _, version := range new.Spec.Versions { if !newVersions.Has(version.Name) { @@ -94,7 +96,7 @@ func NoStoredVersionRemoved(old, new v1.CustomResourceDefinition) error { return nil } -func NoExistingFieldRemoved(old, new v1.CustomResourceDefinition) error { +func NoExistingFieldRemoved(old, new apiextensionsv1.CustomResourceDefinition) error { reg := manifestcomparators.NewRegistry() err := reg.AddComparator(manifestcomparators.NoFieldRemoval()) if err != nil { diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/validator_test.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/validator_test.go new file mode 100644 index 000000000..e13ac9487 --- /dev/null +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/validator_test.go @@ -0,0 +1,340 @@ +// Originally copied from https://github.com/carvel-dev/kapp/tree/d7fc2e15439331aa3a379485bb124e91a0829d2e +// Attribution: +// Copyright 2024 The Carvel Authors. +// SPDX-License-Identifier: Apache-2.0 + +package crdupgradesafety + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func TestValidator(t *testing.T) { + for _, tc := range []struct { + name string + validations []Validation + shouldErr bool + }{ + { + name: "no validators, no error", + validations: []Validation{}, + }, + { + name: "passing validator, no error", + validations: []Validation{ + NewValidationFunc("pass", func(_, _ apiextensionsv1.CustomResourceDefinition) error { + return nil + }), + }, + }, + { + name: "failing validator, error", + validations: []Validation{ + NewValidationFunc("fail", func(_, _ apiextensionsv1.CustomResourceDefinition) error { + return errors.New("boom") + }), + }, + shouldErr: true, + }, + { + name: "passing+failing validator, error", + validations: []Validation{ + NewValidationFunc("pass", func(_, _ apiextensionsv1.CustomResourceDefinition) error { + return nil + }), + NewValidationFunc("fail", func(_, _ apiextensionsv1.CustomResourceDefinition) error { + return errors.New("boom") + }), + }, + shouldErr: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + v := Validator{ + Validations: tc.validations, + } + var o, n apiextensionsv1.CustomResourceDefinition + + err := v.Validate(o, n) + require.Equal(t, tc.shouldErr, err != nil) + }) + } +} + +func TestNoScopeChange(t *testing.T) { + for _, tc := range []struct { + name string + old apiextensionsv1.CustomResourceDefinition + new apiextensionsv1.CustomResourceDefinition + shouldError bool + }{ + { + name: "no scope change, no error", + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Scope: apiextensionsv1.ClusterScoped, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Scope: apiextensionsv1.ClusterScoped, + }, + }, + }, + { + name: "scope change, error", + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Scope: apiextensionsv1.ClusterScoped, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Scope: apiextensionsv1.NamespaceScoped, + }, + }, + shouldError: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + err := NoScopeChange(tc.old, tc.new) + require.Equal(t, tc.shouldError, err != nil) + }) + } +} + +func TestNoStoredVersionRemoved(t *testing.T) { + for _, tc := range []struct { + name string + old apiextensionsv1.CustomResourceDefinition + new apiextensionsv1.CustomResourceDefinition + shouldError bool + }{ + { + name: "no stored versions, no error", + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + }, + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{}, + }, + { + name: "stored versions, no stored version removed, no error", + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + }, + { + Name: "v1alpha2", + }, + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Status: apiextensionsv1.CustomResourceDefinitionStatus{ + StoredVersions: []string{ + "v1alpha1", + }, + }, + }, + }, + { + name: "stored versions, stored version removed, error", + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha2", + }, + }, + }, + }, + old: apiextensionsv1.CustomResourceDefinition{ + Status: apiextensionsv1.CustomResourceDefinitionStatus{ + StoredVersions: []string{ + "v1alpha1", + }, + }, + }, + shouldError: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + err := NoStoredVersionRemoved(tc.old, tc.new) + require.Equal(t, tc.shouldError, err != nil) + }) + } +} + +func TestNoExistingFieldRemoved(t *testing.T) { + for _, tc := range []struct { + name string + new apiextensionsv1.CustomResourceDefinition + old apiextensionsv1.CustomResourceDefinition + shouldError bool + }{ + { + name: "no existing field removed, no error", + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "existing field removed, error", + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + "fieldTwo": { + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + shouldError: true, + }, + { + name: "new version is added with the field removed, no error", + old: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + "fieldTwo": { + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + new: apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + "fieldTwo": { + Type: "string", + }, + }, + }, + }, + }, + { + Name: "v1alpha2", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "fieldOne": { + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + err := NoExistingFieldRemoved(tc.old, tc.new) + assert.Equal(t, tc.shouldError, err != nil) + }) + } +} diff --git a/requirements.txt b/requirements.txt index 9ed78d9e3..137801a70 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,17 +4,17 @@ certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 colorama==0.4.6 -cssselect==1.2.0 +cssselect==1.3.0 ghp-import==2.1.0 idna==3.10 -Jinja2==3.1.5 +Jinja2==3.1.6 lxml==5.3.1 Markdown==3.7 markdown2==2.5.3 MarkupSafe==3.0.2 mergedeep==1.3.4 mkdocs==1.6.1 -mkdocs-material==9.6.7 +mkdocs-material==9.6.9 mkdocs-material-extensions==1.3.1 packaging==24.2 paginate==0.5.7 diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index a01124bfb..7c57a078c 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -38,7 +38,7 @@ func createNamespace(ctx context.Context, name string) (*corev1.Namespace, error Name: name, }, } - err := c.Create(ctx, ns) + err := globalClient.Create(ctx, ns) if err != nil { return nil, err } @@ -52,7 +52,7 @@ func createServiceAccount(ctx context.Context, name types.NamespacedName, cluste Namespace: name.Namespace, }, } - err := c.Create(ctx, sa) + err := globalClient.Create(ctx, sa) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core }, }, } - err := c.Create(ctx, cr) + err := globalClient.Create(ctx, cr) if err != nil { return err } @@ -177,7 +177,7 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core Name: name, }, } - err = c.Create(ctx, crb) + err = globalClient.Create(ctx, crb) if err != nil { return err } @@ -219,7 +219,7 @@ func validateCatalogUnpack(t *testing.T) { catalog := &ocv1.ClusterCatalog{} t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) assert.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeProgressing) assert.NotNil(ct, cond) @@ -234,7 +234,7 @@ func validateCatalogUnpack(t *testing.T) { t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) assert.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeServing) assert.NotNil(ct, cond) @@ -251,7 +251,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for CustomResourceDefinitions of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &apiextensionsv1.CustomResourceDefinitionList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 5*pollDuration, pollInterval) @@ -259,7 +259,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for ClusterRoleBindings of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &rbacv1.ClusterRoleBindingList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 2*pollDuration, pollInterval) @@ -267,7 +267,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for ClusterRoles of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &rbacv1.ClusterRoleList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 2*pollDuration, pollInterval) @@ -275,32 +275,32 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { func testCleanup(t *testing.T, cat *ocv1.ClusterCatalog, clusterExtension *ocv1.ClusterExtension, sa *corev1.ServiceAccount, ns *corev1.Namespace) { t.Logf("By deleting ClusterCatalog %q", cat.Name) - require.NoError(t, c.Delete(context.Background(), cat)) + require.NoError(t, globalClient.Delete(context.Background(), cat)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) t.Logf("By deleting ClusterExtension %q", clusterExtension.Name) - require.NoError(t, c.Delete(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Delete(context.Background(), clusterExtension)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) t.Logf("By deleting ServiceAccount %q", sa.Name) - require.NoError(t, c.Delete(context.Background(), sa)) + require.NoError(t, globalClient.Delete(context.Background(), sa)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) ensureNoExtensionResources(t, clusterExtension.Name) t.Logf("By deleting Namespace %q", ns.Name) - require.NoError(t, c.Delete(context.Background(), ns)) + require.NoError(t, globalClient.Delete(context.Background(), ns)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) } @@ -330,7 +330,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -349,16 +349,16 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting progressing as True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -368,7 +368,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -390,7 +390,7 @@ func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -419,15 +419,15 @@ prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, }, } - require.NoError(t, c.Update(context.Background(), &cm)) + require.NoError(t, globalClient.Update(context.Background(), &cm)) t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, 2*time.Minute, pollInterval) // Give the check 2 minutes instead of the typical 1 for the pod's @@ -436,7 +436,7 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, // ConfigMap cache TTL of 1 minute = 2 minutes t.Log("By eventually reporting progressing as True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -446,7 +446,7 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -465,11 +465,11 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) defer func(cat *ocv1.ClusterCatalog) { - require.NoError(t, c.Delete(context.Background(), cat)) + require.NoError(t, globalClient.Delete(context.Background(), cat)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) }(extraCatalog) @@ -488,16 +488,16 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { } t.Log("It resolves to multiple bundle paths") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a failed resolution with multiple bundles") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True and Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -513,7 +513,7 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -530,10 +530,10 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful installation") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) assert.Equal(ct, &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{ Name: "test-operator.1.0.0", @@ -553,15 +553,15 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { t.Log("By updating the ClusterExtension resource to a non-successor version") // 1.2.0 does not replace/skip/skipRange 1.0.0. clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting an unsatisfiable resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True and Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, ocv1.ReasonRetrying, cond.Reason) @@ -576,7 +576,7 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -592,10 +592,10 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -608,10 +608,10 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { // 1.2.0 does not replace/skip/skipRange 1.0.0. clusterExtension.Spec.Source.Catalog.Version = "1.2.0" clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting a satisfiable resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -625,7 +625,7 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("When resolving upgrade edges") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -641,10 +641,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -656,10 +656,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("By updating the ClusterExtension resource by skipping versions") // 1.0.1 replaces 1.0.0 in the test catalog clusterExtension.Spec.Source.Catalog.Version = "1.0.1" - require.NoError(t, c.Update(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -673,7 +673,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("It resolves again when a catalog is patched with new ImageRef") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -698,11 +698,11 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -716,7 +716,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { err := patchTestCatalog(context.Background(), testCatalogName, updatedCatalogImage) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -726,7 +726,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -760,7 +760,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -779,11 +779,11 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -797,7 +797,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { err = crane.Tag(v2Image, latestImageTag, crane.Insecure) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -807,7 +807,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -821,7 +821,7 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T t.Log("It resolves again when managed content is changed") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -840,11 +840,11 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T } t.Log("It installs the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful installation") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -860,11 +860,11 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T Namespace: clusterExtension.Spec.Namespace, }, } - require.NoError(t, c.Delete(context.Background(), testConfigMap)) + require.NoError(t, globalClient.Delete(context.Background(), testConfigMap)) t.Log("By eventually re-creating the managed resource") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) }, pollDuration, pollInterval) } @@ -881,10 +881,10 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes Namespace: ns.Name, }, } - err := c.Create(context.Background(), sa) + err := globalClient.Create(context.Background(), sa) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -903,16 +903,16 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True with Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -922,7 +922,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes t.Log("By eventually failing to install the package successfully due to insufficient ServiceAccount permissions") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionFalse, cond.Status) @@ -940,7 +940,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes // after creating and binding the needed permissions to the ServiceAccount. t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -952,7 +952,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes t.Log("By eventually reporting Progressing == True with Reason Success") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 354ef75f4..7441d1f0b 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -18,8 +18,8 @@ import ( ) var ( - cfg *rest.Config - c client.Client + globalConfig *rest.Config + globalClient client.Client ) const ( @@ -29,11 +29,11 @@ const ( ) func TestMain(m *testing.M) { - cfg = ctrl.GetConfigOrDie() + globalConfig = ctrl.GetConfigOrDie() var err error utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + globalClient, err = client.New(globalConfig, client.Options{Scheme: scheme.Scheme}) utilruntime.Must(err) os.Exit(m.Run()) @@ -61,7 +61,7 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1 }, } - err := c.Create(ctx, catalog) + err := globalClient.Create(ctx, catalog) return catalog, err } @@ -71,7 +71,7 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1 func patchTestCatalog(ctx context.Context, name string, newImageRef string) error { // Fetch the existing ClusterCatalog catalog := &ocv1.ClusterCatalog{} - err := c.Get(ctx, client.ObjectKey{Name: name}, catalog) + err := globalClient.Get(ctx, client.ObjectKey{Name: name}, catalog) if err != nil { return err } @@ -80,7 +80,7 @@ func patchTestCatalog(ctx context.Context, name string, newImageRef string) erro catalog.Spec.Source.Image.Ref = newImageRef // Patch the ClusterCatalog - err = c.Update(ctx, catalog) + err = globalClient.Update(ctx, catalog) if err != nil { return err } diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go index a1f6c4a2c..3d15035b8 100644 --- a/test/e2e/metrics_test.go +++ b/test/e2e/metrics_test.go @@ -16,23 +16,33 @@ package e2e import ( "bytes" "context" - "fmt" - "io" - "os/exec" + "errors" "strings" "testing" "time" "github.com/stretchr/testify/require" - - "github.com/operator-framework/operator-controller/test/utils" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/config" ) // TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { - client := utils.FindK8sClient(t) - config := NewMetricsTestConfig( - t, client, + kubeClient, restConfig := findK8sClient(t) + mtc := NewMetricsTestConfig( + t, + kubeClient, + restConfig, "control-plane=operator-controller-controller-manager", "operator-controller-metrics-reader", "operator-controller-metrics-binding", @@ -41,14 +51,16 @@ func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { "https://operator-controller-service.NAMESPACE.svc.cluster.local:8443/metrics", ) - config.run() + mtc.run() } // TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd func TestCatalogdMetricsExportedEndpoint(t *testing.T) { - client := utils.FindK8sClient(t) - config := NewMetricsTestConfig( - t, client, + kubeClient, restConfig := findK8sClient(t) + mtc := NewMetricsTestConfig( + t, + kubeClient, + restConfig, "control-plane=catalogd-controller-manager", "catalogd-metrics-reader", "catalogd-metrics-binding", @@ -57,13 +69,25 @@ func TestCatalogdMetricsExportedEndpoint(t *testing.T) { "https://catalogd-service.NAMESPACE.svc.cluster.local:7443/metrics", ) - config.run() + mtc.run() +} + +func findK8sClient(t *testing.T) (kubernetes.Interface, *rest.Config) { + cfg, err := config.GetConfig() + require.NoError(t, err, "Failed to get Kubernetes config") + + clientset, err := kubernetes.NewForConfig(cfg) + require.NoError(t, err, "Failed to create client from config") + + t.Log("Successfully created Kubernetes client via controller-runtime config") + return clientset, cfg } // MetricsTestConfig holds the necessary configurations for testing metrics endpoints. type MetricsTestConfig struct { t *testing.T - client string + kubeClient kubernetes.Interface + restConfig *rest.Config namespace string clusterRole string clusterBinding string @@ -73,13 +97,27 @@ type MetricsTestConfig struct { } // NewMetricsTestConfig initializes a new MetricsTestConfig. -func NewMetricsTestConfig(t *testing.T, client, selector, clusterRole, clusterBinding, serviceAccount, curlPodName, metricsURL string) *MetricsTestConfig { - namespace := getComponentNamespace(t, client, selector) +func NewMetricsTestConfig( + t *testing.T, + kubeClient kubernetes.Interface, + restConfig *rest.Config, + selector string, + clusterRole string, + clusterBinding string, + serviceAccount string, + curlPodName string, + metricsURL string, +) *MetricsTestConfig { + // Discover which namespace the relevant Pod is running in + namespace := getComponentNamespace(t, kubeClient, selector) + + // Replace the placeholder in the metrics URL metricsURL = strings.ReplaceAll(metricsURL, "NAMESPACE", namespace) return &MetricsTestConfig{ t: t, - client: client, + kubeClient: kubeClient, + restConfig: restConfig, namespace: namespace, clusterRole: clusterRole, clusterBinding: clusterBinding, @@ -89,134 +127,252 @@ func NewMetricsTestConfig(t *testing.T, client, selector, clusterRole, clusterBi } } -// run will execute all steps of those tests +// run executes the entire test flow func (c *MetricsTestConfig) run() { - c.createMetricsClusterRoleBinding() - token := c.getServiceAccountToken() - c.createCurlMetricsPod() - c.validate(token) - defer c.cleanup() + ctx := context.Background() + defer c.cleanup(ctx) + c.createMetricsClusterRoleBinding(ctx) + token := c.getServiceAccountToken(ctx) + c.createCurlMetricsPod(ctx) + c.waitForPodReady(ctx) + // Exec `curl` in the Pod to validate the metrics + c.validateMetricsEndpoint(ctx, token) } -// createMetricsClusterRoleBinding to binding and expose the metrics -func (c *MetricsTestConfig) createMetricsClusterRoleBinding() { - c.t.Logf("Creating ClusterRoleBinding %s in namespace %s", c.clusterBinding, c.namespace) - cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding, - "--clusterrole="+c.clusterRole, - "--serviceaccount="+c.namespace+":"+c.serviceAccount) - output, err := cmd.CombinedOutput() - require.NoError(c.t, err, "Error creating ClusterRoleBinding: %s", string(output)) +// createMetricsClusterRoleBinding to bind the cluster role so metrics are accessible +func (c *MetricsTestConfig) createMetricsClusterRoleBinding(ctx context.Context) { + c.t.Logf("Creating ClusterRoleBinding %q in namespace %q", c.clusterBinding, c.namespace) + + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.clusterBinding, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: c.serviceAccount, + Namespace: c.namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: c.clusterRole, + }, + } + + _, err := c.kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}) + require.NoError(c.t, err, "Error creating ClusterRoleBinding") } -// getServiceAccountToken return the token requires to have access to the metrics -func (c *MetricsTestConfig) getServiceAccountToken() string { - c.t.Logf("Generating ServiceAccount token at namespace %s", c.namespace) - cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "-n", c.namespace) - tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd) - require.NoError(c.t, err, "Error creating token: %s", string(tokenCombinedOutput)) - return string(bytes.TrimSpace(tokenOutput)) +// getServiceAccountToken creates a TokenRequest for the service account +func (c *MetricsTestConfig) getServiceAccountToken(ctx context.Context) string { + c.t.Logf("Generating ServiceAccount token in namespace %q", c.namespace) + + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{"https://kubernetes.default.svc.cluster.local"}, + ExpirationSeconds: nil, + }, + } + + tr, err := c.kubeClient.CoreV1(). + ServiceAccounts(c.namespace). + CreateToken(ctx, c.serviceAccount, tokenRequest, metav1.CreateOptions{}) + require.NoError(c.t, err, "Error requesting token for SA %q", c.serviceAccount) + + token := tr.Status.Token + require.NotEmpty(c.t, token, "ServiceAccount token was empty") + return token } -// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working -func (c *MetricsTestConfig) createCurlMetricsPod() { +// createCurlMetricsPod spawns a pod running `curlimages/curl` to check metrics +func (c *MetricsTestConfig) createCurlMetricsPod(ctx context.Context) { c.t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName) - cmd := exec.Command(c.client, "run", c.curlPodName, - "--image=curlimages/curl", "-n", c.namespace, - "--restart=Never", - "--overrides", `{ - "spec": { - "terminationGradePeriodSeconds": 0, - "containers": [{ - "name": "curl", - "image": "curlimages/curl", - "command": ["sh", "-c", "sleep 3600"], - "securityContext": { - "allowPrivilegeEscalation": false, - "capabilities": {"drop": ["ALL"]}, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": {"type": "RuntimeDefault"} - } - }], - "serviceAccountName": "`+c.serviceAccount+`" - } - }`) - output, err := cmd.CombinedOutput() - require.NoError(c.t, err, "Error creating curl pod: %s", string(output)) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.curlPodName, + Namespace: c.namespace, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: c.serviceAccount, + TerminationGracePeriodSeconds: ptr.To(int64(0)), + Containers: []corev1.Container{ + { + Name: "curl", + Image: "curlimages/curl", + Command: []string{"sh", "-c", "sleep 3600"}, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + RunAsNonRoot: ptr.To(true), + RunAsUser: ptr.To(int64(1000)), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + } + + _, err := c.kubeClient.CoreV1().Pods(c.namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(c.t, err, "Error creating curl pod") } -// validate verifies if is possible to access the metrics -func (c *MetricsTestConfig) validate(token string) { +// waitForPodReady polls until the Pod is in Ready condition +func (c *MetricsTestConfig) waitForPodReady(ctx context.Context) { c.t.Log("Waiting for the curl pod to be ready") - waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "-n", c.namespace, "--timeout=60s") - waitOutput, waitErr := waitCmd.CombinedOutput() - require.NoError(c.t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput)) - - c.t.Log("Validating the metrics endpoint") - curlCmd := exec.Command(c.client, "exec", c.curlPodName, "-n", c.namespace, "--", - "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, c.metricsURL) - output, err := curlCmd.CombinedOutput() - require.NoError(c.t, err, "Error calling metrics endpoint: %s", string(output)) - require.Contains(c.t, string(output), "200 OK", "Metrics endpoint did not return 200 OK") + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { + pod, err := c.kubeClient.CoreV1().Pods(c.namespace).Get(ctx, c.curlPodName, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + return true, nil + } + } + return false, nil + }) + if errors.Is(err, context.DeadlineExceeded) { + c.t.Fatal("Timed out waiting for the curl pod to become Ready") + } + require.NoError(c.t, err, "Error waiting for curl pod to become Ready") } -// cleanup removes the created resources. Uses a context with timeout to prevent hangs. -func (c *MetricsTestConfig) cleanup() { - c.t.Log("Cleaning up resources") - _ = exec.Command(c.client, "delete", "clusterrolebinding", c.clusterBinding, "--ignore-not-found=true", "--force").Run() - _ = exec.Command(c.client, "delete", "pod", c.curlPodName, "-n", c.namespace, "--ignore-not-found=true", "--force").Run() +// validateMetricsEndpoint performs `kubectl exec ... curl ` logic +func (c *MetricsTestConfig) validateMetricsEndpoint(ctx context.Context, token string) { + c.t.Log("Validating the metrics endpoint via pod exec") - // Create a context with a 60-second timeout. - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() + // The command to run inside the container + cmd := []string{ + "curl", "-v", "-k", + "-H", "Authorization: Bearer " + token, + c.metricsURL, + } - // Wait for the ClusterRoleBinding to be deleted. - if err := waitForDeletion(ctx, c.client, "clusterrolebinding", c.clusterBinding); err != nil { - c.t.Logf("Error waiting for clusterrolebinding deletion: %v", err) - } else { - c.t.Log("ClusterRoleBinding deleted") + // Construct the request to exec into the pod + req := c.kubeClient.CoreV1().RESTClient(). + Post(). + Resource("pods"). + Namespace(c.namespace). + Name(c.curlPodName). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Container: "curl", + Command: cmd, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + }, scheme.ParameterCodec) + + // Create an SPDY executor + executor, err := remotecommand.NewSPDYExecutor(c.restConfig, "POST", req.URL()) + require.NoError(c.t, err, "Error creating SPDY executor to exec in pod") + + var stdout, stderr bytes.Buffer + streamOpts := remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + Tty: false, } - // Wait for the Pod to be deleted. - if err := waitForDeletion(ctx, c.client, "pod", c.curlPodName, "-n", c.namespace); err != nil { - c.t.Logf("Error waiting for pod deletion: %v", err) + err = executor.StreamWithContext(ctx, streamOpts) + require.NoError(c.t, err, "Error streaming exec request: %v", stderr.String()) + + // Combine stdout + stderr + combined := stdout.String() + stderr.String() + require.Contains(c.t, combined, "200 OK", "Metrics endpoint did not return 200 OK") +} + +// cleanup deletes the test resources +func (c *MetricsTestConfig) cleanup(ctx context.Context) { + c.t.Log("Cleaning up resources") + policy := metav1.DeletePropagationForeground + + // Delete the ClusterRoleBinding + _ = c.kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, c.clusterBinding, metav1.DeleteOptions{ + PropagationPolicy: &policy, + }) + waitForClusterRoleBindingDeletion(ctx, c.t, c.kubeClient, c.clusterBinding) + + // "Force" delete the Pod by setting grace period to 0 + gracePeriod := int64(0) + _ = c.kubeClient.CoreV1().Pods(c.namespace).Delete(ctx, c.curlPodName, metav1.DeleteOptions{ + GracePeriodSeconds: &gracePeriod, + PropagationPolicy: &policy, + }) + waitForPodDeletion(ctx, c.t, c.kubeClient, c.namespace, c.curlPodName) +} + +// waitForClusterRoleBindingDeletion polls until the named ClusterRoleBinding no longer exists +func waitForClusterRoleBindingDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, name string) { + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { + _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + }) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("Timed out waiting for ClusterRoleBinding %q to be deleted", name) + } + t.Logf("Error waiting for ClusterRoleBinding %q deletion: %v", name, err) } else { - c.t.Log("Pod deleted") + t.Logf("ClusterRoleBinding %q deleted", name) } } -// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted -// or until the 60-second timeout is reached. -func waitForDeletion(ctx context.Context, client, resourceType, resourceName string, extraArgs ...string) error { - args := []string{"wait", "--for=delete", resourceType, resourceName} - args = append(args, extraArgs...) - args = append(args, "--timeout=60s") - cmd := exec.CommandContext(ctx, client, args...) - output, err := cmd.CombinedOutput() +// waitForPodDeletion polls until the named Pod no longer exists +func waitForPodDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, name string) { + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 90*time.Second, false, func(ctx context.Context) (bool, error) { + pod, getErr := kubeClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if getErr != nil { + if apierrors.IsNotFound(getErr) { + return true, nil + } + return false, getErr + } + // Some extra log info if the Pod is still around + t.Logf("Pod %q still present, phase=%q, deleting... (Timestamp=%v)", + name, pod.Status.Phase, pod.DeletionTimestamp) + return false, nil + }) if err != nil { - return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output)) + if errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("Timed out waiting for Pod %q to be deleted", name) + } + t.Logf("Error waiting for Pod %q deletion: %v", name, err) + } else { + t.Logf("Pod %q deleted", name) } - return nil } -// getComponentNamespace returns the namespace where operator-controller or catalogd is running -func getComponentNamespace(t *testing.T, client, selector string) string { - cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error determining namespace: %s", string(output)) +// getComponentNamespace identifies which Namespace is running a Pod that matches `selector` +func getComponentNamespace(t *testing.T, kubeClient kubernetes.Interface, selector string) string { + t.Logf("Listing pods for selector %q to discover namespace", selector) + ctx := context.Background() + + pods, err := kubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + require.NoError(t, err, "Error listing pods for selector %q", selector) + require.NotEmpty(t, pods.Items, "No pods found for selector %q", selector) - namespace := string(bytes.TrimSpace(output)) + namespace := pods.Items[0].Namespace if namespace == "" { - t.Fatal("No namespace found for selector " + selector) + t.Fatalf("No namespace found for selector %q", selector) } return namespace } - -func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) { - var outOnly, outAndErr bytes.Buffer - allWriter := io.MultiWriter(&outOnly, &outAndErr) - cmd.Stdout = allWriter - cmd.Stderr = &outAndErr - err := cmd.Run() - return outOnly.Bytes(), outAndErr.Bytes(), err -} diff --git a/test/utils/utils.go b/test/utils/utils.go deleted file mode 100644 index 1acc55fe6..000000000 --- a/test/utils/utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package utils - -import ( - "context" - "fmt" - "io" - "net/url" - "os/exec" - "strings" - "testing" - - "k8s.io/client-go/kubernetes" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" -) - -// FindK8sClient returns the first available Kubernetes CLI client from the system, -// It checks for the existence of each client by running `version --client`. -// If no suitable client is found, the function terminates the test with a failure. -func FindK8sClient(t *testing.T) string { - t.Logf("Finding kubectl client") - clients := []string{"kubectl", "oc"} - for _, c := range clients { - // Would prefer to use `command -v`, but even that may not be installed! - if err := exec.Command(c, "version", "--client").Run(); err == nil { - t.Logf("Using %q as k8s client", c) - return c - } - } - t.Fatal("k8s client not found") - return "" -} - -func ReadTestCatalogServerContents(ctx context.Context, catalog *ocv1.ClusterCatalog, kubeClient kubernetes.Interface) ([]byte, error) { - if catalog == nil { - return nil, fmt.Errorf("cannot read nil catalog") - } - if catalog.Status.URLs == nil { - return nil, fmt.Errorf("catalog %q has no catalog urls", catalog.Name) - } - url, err := url.Parse(catalog.Status.URLs.Base) - if err != nil { - return nil, fmt.Errorf("error parsing clustercatalog url %q: %v", catalog.Status.URLs.Base, err) - } - // url is expected to be in the format of - // http://{service_name}.{namespace}.svc/catalogs/{catalog_name}/ - // so to get the namespace and name of the service we grab only - // the hostname and split it on the '.' character - ns := strings.Split(url.Hostname(), ".")[1] - name := strings.Split(url.Hostname(), ".")[0] - port := url.Port() - // the ProxyGet() call below needs an explicit port value, so if - // value from url.Port() is empty, we assume port 443. - if port == "" { - if url.Scheme == "https" { - port = "443" - } else { - port = "80" - } - } - resp := kubeClient.CoreV1().Services(ns).ProxyGet(url.Scheme, name, port, url.JoinPath("api", "v1", "all").Path, map[string]string{}) - rc, err := resp.Stream(ctx) - if err != nil { - return nil, err - } - defer rc.Close() - - return io.ReadAll(rc) -} diff --git a/vendor/carvel.dev/kapp/LICENSE b/vendor/carvel.dev/kapp/LICENSE deleted file mode 100644 index 1a9893b43..000000000 --- a/vendor/carvel.dev/kapp/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/carvel.dev/kapp/NOTICE b/vendor/carvel.dev/kapp/NOTICE deleted file mode 100644 index fc991529f..000000000 --- a/vendor/carvel.dev/kapp/NOTICE +++ /dev/null @@ -1,15 +0,0 @@ -kapp - -Copyright (c) 2019-Present Pivotal Software, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/age_value.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/age_value.go deleted file mode 100644 index cb249344a..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/age_value.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "time" - - uitable "github.com/cppforlife/go-cli-ui/ui/table" - "k8s.io/apimachinery/pkg/util/duration" -) - -type ValueAge struct { - T time.Time -} - -var _ uitable.Value = ValueAge{} - -func NewValueAge(t time.Time) ValueAge { return ValueAge{T: t} } - -func (t ValueAge) String() string { - if t.T.IsZero() { - return "" - } - return duration.ShortHumanDuration(time.Now().Sub(t.T)) -} - -func (t ValueAge) Value() uitable.Value { return t } - -func (t ValueAge) Compare(other uitable.Value) int { - otherT := other.(ValueAge).T - switch { - case t.T.Equal(otherT): - return 0 - case t.T.Before(otherT): - return -1 - default: - return 1 - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/cancel_signals.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/cancel_signals.go deleted file mode 100644 index 781b0bd9a..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/cancel_signals.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "os" - "os/signal" - "syscall" -) - -type CancelSignals struct{} - -func (CancelSignals) Watch(stopFunc func()) { - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, syscall.SIGINT, syscall.SIGHUP) - go func() { - defer signal.Stop(signalCh) - select { - case <-signalCh: - stopFunc() - } - }() -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/config_factory.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/config_factory.go deleted file mode 100644 index 621b93f75..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/config_factory.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - "net" - "os" - "strings" - - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -type ConfigFactory interface { - ConfigurePathResolver(func() (string, error)) - ConfigureContextResolver(func() (string, error)) - ConfigureYAMLResolver(func() (string, error)) - ConfigureClient(float32, int) - RESTConfig() (*rest.Config, error) - DefaultNamespace() (string, error) -} - -type ConfigFactoryImpl struct { - pathResolverFunc func() (string, error) - contextResolverFunc func() (string, error) - yamlResolverFunc func() (string, error) - - qps float32 - burst int -} - -var _ ConfigFactory = &ConfigFactoryImpl{} - -func NewConfigFactoryImpl() *ConfigFactoryImpl { - return &ConfigFactoryImpl{} -} - -func (f *ConfigFactoryImpl) ConfigurePathResolver(resolverFunc func() (string, error)) { - f.pathResolverFunc = resolverFunc -} - -func (f *ConfigFactoryImpl) ConfigureContextResolver(resolverFunc func() (string, error)) { - f.contextResolverFunc = resolverFunc -} - -func (f *ConfigFactoryImpl) ConfigureYAMLResolver(resolverFunc func() (string, error)) { - f.yamlResolverFunc = resolverFunc -} - -func (f *ConfigFactoryImpl) ConfigureClient(qps float32, burst int) { - f.qps = qps - f.burst = burst -} - -func (f *ConfigFactoryImpl) RESTConfig() (*rest.Config, error) { - isExplicitYAMLConfig, config, err := f.clientConfig() - if err != nil { - return nil, err - } - - restConfig, err := config.ClientConfig() - if err != nil { - prefixMsg := "" - if isExplicitYAMLConfig { - prefixMsg = " (explicit config given)" - } - - hintMsg := "" - if strings.Contains(err.Error(), "no configuration has been provided") { - hintMsg = "Ensure cluster name is specified correctly in context configuration" - } - if len(hintMsg) > 0 { - hintMsg = " (hint: " + hintMsg + ")" - } - - return nil, fmt.Errorf("Building Kubernetes config%s: %w%s", prefixMsg, err, hintMsg) - } - - if f.qps > 0.0 { - restConfig.QPS = f.qps - restConfig.Burst = f.burst - } - - return restConfig, nil -} - -func (f *ConfigFactoryImpl) DefaultNamespace() (string, error) { - _, config, err := f.clientConfig() - if err != nil { - return "", err - } - - name, _, err := config.Namespace() - return name, err -} - -func (f *ConfigFactoryImpl) clientConfig() (bool, clientcmd.ClientConfig, error) { - path, err := f.pathResolverFunc() - if err != nil { - return false, nil, fmt.Errorf("Resolving config path: %w", err) - } - - context, err := f.contextResolverFunc() - if err != nil { - return false, nil, fmt.Errorf("Resolving config context: %w", err) - } - - configYAML, err := f.yamlResolverFunc() - if err != nil { - return false, nil, fmt.Errorf("Resolving config YAML: %w", err) - } - - if len(configYAML) > 0 { - kubernetesHost := os.Getenv("KUBERNETES_SERVICE_HOST") - kubernetesServicePort := os.Getenv("KUBERNETES_SERVICE_PORT") - envHostPort := net.JoinHostPort(kubernetesHost, kubernetesServicePort) - if kubernetesServicePort == "" { - // client-go will manually add the port based on http/https - envHostPort = kubernetesHost - } - configYAML = strings.ReplaceAll(configYAML, "${KAPP_KUBERNETES_SERVICE_HOST_PORT}", envHostPort) - config, err := clientcmd.NewClientConfigFromBytes([]byte(configYAML)) - return true, config, err - } - - // Based on https://github.com/kubernetes/kubernetes/blob/30c7df5cd822067016640aa267714204ac089172/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#L124 - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - overrides := &clientcmd.ConfigOverrides{} - - if len(path) > 0 { - loadingRules.ExplicitPath = path - } - if len(context) > 0 { - overrides.CurrentContext = context - } - - return false, clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides), nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/deduping_messages_ui.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/deduping_messages_ui.go deleted file mode 100644 index 1b4a46406..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/deduping_messages_ui.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - "strings" - "sync" - "time" -) - -const ( - defaultForgetAfterDuration time.Duration = 1 * time.Minute -) - -type DedupingMessagesUI struct { - ui MessagesUI - lastSeen map[string]time.Time - lastSeenLock sync.Mutex - forgetAfter time.Duration -} - -var _ MessagesUI = &DedupingMessagesUI{} - -func NewDedupingMessagesUI(ui MessagesUI) *DedupingMessagesUI { - return &DedupingMessagesUI{ - ui: ui, - lastSeen: map[string]time.Time{}, - forgetAfter: defaultForgetAfterDuration, - } -} - -func (ui *DedupingMessagesUI) NotifySection(msg string, args ...interface{}) { - msg = fmt.Sprintf(msg, args...) - id := msg - - if ui.shouldSeeAndMark(id) { - ui.ui.NotifySection(msg) - } -} - -func (ui *DedupingMessagesUI) Notify(msgs []string) { - id := strings.Join(msgs, "\n") - - if ui.shouldSeeAndMark(id) { - ui.ui.Notify(msgs) - } -} - -func (ui *DedupingMessagesUI) shouldSeeAndMark(id string) bool { - ui.lastSeenLock.Lock() - defer ui.lastSeenLock.Unlock() - - when, found := ui.lastSeen[id] - if !found || time.Now().Sub(when) > ui.forgetAfter { - ui.lastSeen[id] = time.Now() - return true - } - - return false -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/deps_factory.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/deps_factory.go deleted file mode 100644 index 7482e7eaf..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/deps_factory.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "context" - "fmt" - "sync" - - "github.com/cppforlife/go-cli-ui/ui" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - "k8s.io/client-go/discovery/cached/memory" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -type DepsFactory interface { - DynamicClient(opts DynamicClientOpts) (dynamic.Interface, error) - CoreClient() (kubernetes.Interface, error) - RESTMapper() (meta.RESTMapper, error) - ConfigureWarnings(warnings bool) -} - -type DepsFactoryImpl struct { - configFactory ConfigFactory - ui ui.UI - printTargetOnce *sync.Once - - Warnings bool -} - -var _ DepsFactory = &DepsFactoryImpl{} - -func NewDepsFactoryImpl(configFactory ConfigFactory, ui ui.UI) *DepsFactoryImpl { - return &DepsFactoryImpl{ - configFactory: configFactory, - ui: ui, - printTargetOnce: &sync.Once{}} -} - -type DynamicClientOpts struct { - Warnings bool -} - -func (f *DepsFactoryImpl) DynamicClient(opts DynamicClientOpts) (dynamic.Interface, error) { - config, err := f.configFactory.RESTConfig() - if err != nil { - return nil, err - } - - // copy to avoid mutating the passed-in config - cpConfig := rest.CopyConfig(config) - - if opts.Warnings { - cpConfig.WarningHandler = f.newWarningHandler() - } else { - cpConfig.WarningHandler = rest.NoWarnings{} - } - - clientset, err := dynamic.NewForConfig(cpConfig) - if err != nil { - return nil, fmt.Errorf("Building Dynamic clientset: %w", err) - } - - f.printTarget(config) - - return clientset, nil -} - -func (f *DepsFactoryImpl) CoreClient() (kubernetes.Interface, error) { - config, err := f.configFactory.RESTConfig() - if err != nil { - return nil, err - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("Building Core clientset: %w", err) - } - - f.printTarget(config) - - return clientset, nil -} - -func (f *DepsFactoryImpl) RESTMapper() (meta.RESTMapper, error) { - config, err := f.configFactory.RESTConfig() - if err != nil { - return nil, err - } - - disc, err := discovery.NewDiscoveryClientForConfig(config) - if err != nil { - return nil, err - } - - cachedDisc := memory.NewMemCacheClient(disc) - mapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDisc) - - f.printTarget(config) - - return mapper, nil -} - -func (f *DepsFactoryImpl) ConfigureWarnings(warnings bool) { - f.Warnings = warnings -} - -func (f *DepsFactoryImpl) printTarget(config *rest.Config) { - f.printTargetOnce.Do(func() { - nodesDesc := f.summarizeNodes(config) - if len(nodesDesc) > 0 { - nodesDesc = fmt.Sprintf(" (nodes: %s)", nodesDesc) - } - f.ui.PrintLinef("Target cluster '%s'%s", config.Host, nodesDesc) - }) -} - -func (f *DepsFactoryImpl) summarizeNodes(config *rest.Config) string { - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return "" - } - - nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return "" - } - - switch len(nodes.Items) { - case 0: - return "" - - case 1: - return nodes.Items[0].Name - - default: - oldestNode := nodes.Items[0] - for _, node := range nodes.Items { - if node.CreationTimestamp.Before(&oldestNode.CreationTimestamp) { - oldestNode = node - } - } - return fmt.Sprintf("%s, %d+", oldestNode.Name, len(nodes.Items)-1) - } -} - -func (f *DepsFactoryImpl) newWarningHandler() rest.WarningHandler { - if !f.Warnings { - return rest.NoWarnings{} - } - options := rest.WarningWriterOptions{ - Deduplicate: true, - Color: false, - } - warningWriter := rest.NewWarningWriter(uiWriter{ui: f.ui}, options) - return warningWriter -} - -type uiWriter struct { - ui ui.UI -} - -func (w uiWriter) Write(data []byte) (int, error) { - w.ui.BeginLinef("%s", data) - return len(data), nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/flags_factory.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/flags_factory.go deleted file mode 100644 index e975ac1b4..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/flags_factory.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -type FlagsFactory struct { - configFactory ConfigFactory - depsFactory DepsFactory -} - -func NewFlagsFactory(configFactory ConfigFactory, depsFactory DepsFactory) FlagsFactory { - return FlagsFactory{configFactory, depsFactory} -} - -func (f FlagsFactory) NewNamespaceNameFlag(str *string) *NamespaceNameFlag { - return NewNamespaceNameFlag(str, f.configFactory) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/generate_name_flags.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/generate_name_flags.go deleted file mode 100644 index 25ef47409..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/generate_name_flags.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type GenerateNameFlags struct { - GenerateName bool -} - -func (s *GenerateNameFlags) Set(cmd *cobra.Command, _ FlagsFactory) { - cmd.Flags().BoolVar(&s.GenerateName, "generate-name", false, "Set to generate name") -} - -func (s *GenerateNameFlags) Apply(meta metav1.ObjectMeta) metav1.ObjectMeta { - if s.GenerateName { - meta.GenerateName = meta.Name + "-" - meta.Name = "" - } - return meta -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/help_sections.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/help_sections.go deleted file mode 100644 index 31b73081e..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/help_sections.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "github.com/cppforlife/cobrautil" -) - -const ( - cmdGroupKey = "kapp-group" -) - -var ( - AppHelpGroup = cobrautil.HelpSection{ - Key: cmdGroupKey, - Value: "app", - Title: "App Commands:", - } - AppSupportHelpGroup = cobrautil.HelpSection{ - Key: cmdGroupKey, - Value: "app-support", - Title: "App Support Commands:", - } - MiscHelpGroup = cobrautil.HelpSection{ - Key: cmdGroupKey, - Value: "misc", - Title: "Misc Commands:", - } - RestOfCommandsHelpGroup = cobrautil.HelpSection{ - Key: cmdGroupKey, - Value: "", // default - Title: "Available/Other Commands:", - } -) diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/kube_api_flags.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/kube_api_flags.go deleted file mode 100644 index d275a9d02..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/kube_api_flags.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "github.com/spf13/cobra" -) - -type KubeAPIFlags struct { - QPS float32 - Burst int -} - -func (f *KubeAPIFlags) Set(cmd *cobra.Command, _ FlagsFactory) { - // Similar names are used by kubelet and other controllers - cmd.PersistentFlags().Float32Var(&f.QPS, "kube-api-qps", 1000, "Set Kubernetes API client QPS limit") - cmd.PersistentFlags().IntVar(&f.Burst, "kube-api-burst", 1000, "Set Kubernetes API client burst limit") -} - -func (f *KubeAPIFlags) Configure(config ConfigFactory) { - config.ConfigureClient(f.QPS, f.Burst) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/kubeconfig_flags.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/kubeconfig_flags.go deleted file mode 100644 index 1390483f4..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/kubeconfig_flags.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "os" - - "github.com/cppforlife/cobrautil" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type KubeconfigFlags struct { - Path *KubeconfigPathFlag - Context *KubeconfigContextFlag - YAML *KubeconfigYAMLFlag -} - -func (f *KubeconfigFlags) Set(cmd *cobra.Command, _ FlagsFactory) { - f.Path = NewKubeconfigPathFlag() - cmd.PersistentFlags().Var(f.Path, "kubeconfig", "Path to the kubeconfig file ($KAPP_KUBECONFIG)") - - f.Context = NewKubeconfigContextFlag() - cmd.PersistentFlags().Var(f.Context, "kubeconfig-context", "Kubeconfig context override ($KAPP_KUBECONFIG_CONTEXT)") - - f.YAML = NewKubeconfigYAMLFlag() - cmd.PersistentFlags().Var(f.YAML, "kubeconfig-yaml", "Kubeconfig contents as YAML ($KAPP_KUBECONFIG_YAML)") -} - -type KubeconfigPathFlag struct { - value string -} - -var _ pflag.Value = &KubeconfigPathFlag{} -var _ cobrautil.ResolvableFlag = &KubeconfigPathFlag{} - -func NewKubeconfigPathFlag() *KubeconfigPathFlag { - return &KubeconfigPathFlag{} -} - -func (s *KubeconfigPathFlag) Set(val string) error { - s.value = val - return nil -} - -func (s *KubeconfigPathFlag) Type() string { return "string" } -func (s *KubeconfigPathFlag) String() string { return "" } // default for usage - -func (s *KubeconfigPathFlag) Value() (string, error) { - err := s.Resolve() - if err != nil { - return "", err - } - - return s.value, nil -} - -func (s *KubeconfigPathFlag) Resolve() error { - if len(s.value) > 0 { - return nil - } - - s.value = s.resolveValue() - - return nil -} - -func (s *KubeconfigPathFlag) resolveValue() string { - path := os.Getenv("KAPP_KUBECONFIG") - if len(path) > 0 { - return path - } - - return "" -} - -type KubeconfigContextFlag struct { - value string -} - -var _ pflag.Value = &KubeconfigContextFlag{} -var _ cobrautil.ResolvableFlag = &KubeconfigPathFlag{} - -func NewKubeconfigContextFlag() *KubeconfigContextFlag { - return &KubeconfigContextFlag{} -} - -func (s *KubeconfigContextFlag) Set(val string) error { - s.value = val - return nil -} - -func (s *KubeconfigContextFlag) Type() string { return "string" } -func (s *KubeconfigContextFlag) String() string { return "" } // default for usage - -func (s *KubeconfigContextFlag) Value() (string, error) { - err := s.Resolve() - if err != nil { - return "", err - } - - return s.value, nil -} - -func (s *KubeconfigContextFlag) Resolve() error { - if len(s.value) > 0 { - return nil - } - - s.value = os.Getenv("KAPP_KUBECONFIG_CONTEXT") - - return nil -} - -type KubeconfigYAMLFlag struct { - value string -} - -var _ pflag.Value = &KubeconfigYAMLFlag{} -var _ cobrautil.ResolvableFlag = &KubeconfigPathFlag{} - -func NewKubeconfigYAMLFlag() *KubeconfigYAMLFlag { - return &KubeconfigYAMLFlag{} -} - -func (s *KubeconfigYAMLFlag) Set(val string) error { - s.value = val - return nil -} - -func (s *KubeconfigYAMLFlag) Type() string { return "string" } -func (s *KubeconfigYAMLFlag) String() string { return "" } // default for usage - -func (s *KubeconfigYAMLFlag) Value() (string, error) { - err := s.Resolve() - if err != nil { - return "", err - } - - return s.value, nil -} - -func (s *KubeconfigYAMLFlag) Resolve() error { - if len(s.value) > 0 { - return nil - } - - s.value = os.Getenv("KAPP_KUBECONFIG_YAML") - - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/messages_ui.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/messages_ui.go deleted file mode 100644 index aad3a7a6a..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/messages_ui.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "sync" - "time" - - "github.com/cppforlife/go-cli-ui/ui" -) - -type MessagesUI interface { - NotifySection(msg string, args ...interface{}) - Notify(msgs []string) -} - -type PlainMessagesUI struct { - ui ui.UI - uiLock sync.RWMutex -} - -var _ MessagesUI = &PlainMessagesUI{} - -func NewPlainMessagesUI(ui ui.UI) *PlainMessagesUI { - return &PlainMessagesUI{ui: ui} -} - -func (ui *PlainMessagesUI) NotifySection(msg string, args ...interface{}) { - ui.notify("---- "+msg+" ----", args...) -} - -func (ui *PlainMessagesUI) Notify(msgs []string) { - for _, msg := range msgs { - ui.notify("%s", msg) - } -} - -func (ui *PlainMessagesUI) notify(msg string, args ...interface{}) { - ui.uiLock.Lock() - defer ui.uiLock.Unlock() - - ui.ui.BeginLinef(time.Now().Format("3:04:05PM")+": "+msg+"\n", args...) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/namespace_flags.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/namespace_flags.go deleted file mode 100644 index 72f9e8bf6..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/namespace_flags.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - "os" - - "github.com/cppforlife/cobrautil" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type NamespaceFlags struct { - Name string -} - -func (s *NamespaceFlags) Set(cmd *cobra.Command, flagsFactory FlagsFactory) { - name := flagsFactory.NewNamespaceNameFlag(&s.Name) - cmd.Flags().VarP(name, "namespace", "n", "Specified namespace ($KAPP_NAMESPACE or default from kubeconfig)") -} - -type NamespaceNameFlag struct { - value *string - configFactory ConfigFactory -} - -var _ pflag.Value = &NamespaceNameFlag{} -var _ cobrautil.ResolvableFlag = &NamespaceNameFlag{} - -func NewNamespaceNameFlag(value *string, configFactory ConfigFactory) *NamespaceNameFlag { - return &NamespaceNameFlag{value, configFactory} -} - -func (s *NamespaceNameFlag) Set(val string) error { - *s.value = val - return nil -} - -func (s *NamespaceNameFlag) Type() string { return "string" } -func (s *NamespaceNameFlag) String() string { return "" } // default for usage - -func (s *NamespaceNameFlag) Resolve() error { - value, err := s.resolveValue() - if err != nil { - return err - } - - *s.value = value - - return nil -} - -func (s *NamespaceNameFlag) resolveValue() (string, error) { - if s.value != nil && len(*s.value) > 0 { - return *s.value, nil - } - - envVal := os.Getenv("KAPP_NAMESPACE") - if len(envVal) > 0 { - return envVal, nil - } - - configVal, err := s.configFactory.DefaultNamespace() - if err != nil { - return configVal, nil - } - - if len(configVal) > 0 { - return configVal, nil - } - - return "", fmt.Errorf("Expected to non-empty namespace name") -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/namespace_value.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/namespace_value.go deleted file mode 100644 index 20e8a7d02..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/namespace_value.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - uitable "github.com/cppforlife/go-cli-ui/ui/table" -) - -func NewValueNamespace(ns string) uitable.ValueString { - if len(ns) > 0 { - return uitable.NewValueString(ns) - } - return uitable.NewValueString("(cluster)") -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/strings_single_line_value.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/strings_single_line_value.go deleted file mode 100644 index 5b835a4c9..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/strings_single_line_value.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "strings" - - uitable "github.com/cppforlife/go-cli-ui/ui/table" -) - -type ValueStringsSingleLine struct { - S []string -} - -func NewValueStringsSingleLine(s []string) ValueStringsSingleLine { - return ValueStringsSingleLine{S: s} -} - -func (t ValueStringsSingleLine) String() string { return strings.Join(t.S, ", ") } -func (t ValueStringsSingleLine) Value() uitable.Value { return t } - -func (t ValueStringsSingleLine) Compare(other uitable.Value) int { - otherS := other.(ValueStringsSingleLine).S - switch { - case len(t.S) == len(otherS): - return 0 - case len(t.S) < len(otherS): - return -1 - default: - return 1 - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/unknown_bool_value.go b/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/unknown_bool_value.go deleted file mode 100644 index bfedc6b22..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/cmd/core/unknown_bool_value.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - uitable "github.com/cppforlife/go-cli-ui/ui/table" -) - -type ValueUnknownBool struct { - B *bool -} - -var _ uitable.Value = ValueUnknownBool{} - -func NewValueUnknownBool(b *bool) ValueUnknownBool { return ValueUnknownBool{B: b} } - -func (t ValueUnknownBool) String() string { - if t.B != nil { - return fmt.Sprintf("%t", *t.B) - } - return "" -} - -func (t ValueUnknownBool) Value() uitable.Value { return t } -func (t ValueUnknownBool) Compare(_ uitable.Value) int { panic("Never called") } diff --git a/vendor/carvel.dev/kapp/pkg/kapp/config/conf.go b/vendor/carvel.dev/kapp/pkg/kapp/config/conf.go deleted file mode 100644 index 3a3c2486e..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/config/conf.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - corev1 "k8s.io/api/core/v1" -) - -const ( - configLabelKey = "kapp.k14s.io/config" - configMapConfigKey = "config.yml" -) - -type Conf struct { - configs []Config -} - -func NewConfFromResources(resources []ctlres.Resource) ([]ctlres.Resource, Conf, error) { - var rsWithoutConfigs []ctlres.Resource - var configs []Config - - for _, res := range resources { - _, isLabeledAsConfig := res.Labels()[configLabelKey] - - switch { - case res.APIVersion() == configAPIVersion: - config, err := NewConfigFromResource(res) - if err != nil { - return nil, Conf{}, fmt.Errorf( - "Parsing resource '%s' as kapp config: %w", res.Description(), err) - } - configs = append(configs, config) - - case isLabeledAsConfig: - config, err := newConfigFromConfigMapRes(res) - if err != nil { - return nil, Conf{}, fmt.Errorf( - "Parsing resource '%s' labeled as kapp config: %w", res.Description(), err) - } - // Make sure to add ConfigMap resource to regular resources list - // (our goal of allowing kapp config in ConfigMaps is to allow - // both kubectl and kapp to work against exactly same configuration; - // hence want to preserve same behaviour) - rsWithoutConfigs = append(rsWithoutConfigs, res) - configs = append(configs, config) - - default: - rsWithoutConfigs = append(rsWithoutConfigs, res) - } - } - - return rsWithoutConfigs, Conf{configs}, nil -} - -func newConfigFromConfigMapRes(res ctlres.Resource) (Config, error) { - if res.APIVersion() != "v1" || res.Kind() != "ConfigMap" { - errMsg := "Expected kapp config to be within v1/ConfigMap but apiVersion or kind do not match" - return Config{}, fmt.Errorf(errMsg, res.Description()) - } - - configCM := corev1.ConfigMap{} - - err := res.AsTypedObj(&configCM) - if err != nil { - return Config{}, fmt.Errorf("Converting resource to ConfigMap: %w", err) - } - - configStr, found := configCM.Data[configMapConfigKey] - if !found { - return Config{}, fmt.Errorf("Expected to find field 'data.\"%s\"', but did not", configMapConfigKey) - } - - configRes, err := ctlres.NewResourceFromBytes([]byte(configStr)) - if err != nil { - return Config{}, fmt.Errorf("Parsing kapp config as resource: %w", err) - } - - return NewConfigFromResource(configRes) -} - -func (c Conf) RebaseMods() []ctlres.ResourceModWithMultiple { - var mods []ctlres.ResourceModWithMultiple - for _, config := range c.configs { - for _, rule := range config.RebaseRules { - mods = append(mods, rule.AsMods()...) - } - } - return mods -} - -func (c Conf) DiffAgainstLastAppliedFieldExclusionMods() []ctlres.FieldRemoveMod { - var mods []ctlres.FieldRemoveMod - for _, config := range c.configs { - for _, rule := range config.DiffAgainstLastAppliedFieldExclusionRules { - mods = append(mods, rule.AsMod()) - } - } - return mods -} - -func (c Conf) DiffAgainstExistingFieldExclusionMods() []ctlres.FieldRemoveMod { - var mods []ctlres.FieldRemoveMod - for _, config := range c.configs { - for _, rule := range config.DiffAgainstExistingFieldExclusionRules { - mods = append(mods, rule.AsMod()) - } - } - return mods -} - -func (c Conf) OwnershipLabelMods() func(kvs map[string]string) []ctlres.StringMapAppendMod { - return func(kvs map[string]string) []ctlres.StringMapAppendMod { - var mods []ctlres.StringMapAppendMod - for _, config := range c.configs { - for _, rule := range config.OwnershipLabelRules { - mods = append(mods, rule.AsMod(kvs)) - } - } - return mods - } -} - -func (c Conf) WaitRules() []WaitRule { - var rules []WaitRule - for _, config := range c.configs { - rules = append(rules, config.WaitRules...) - } - return rules -} - -func (c Conf) LabelScopingMods(defaultRules bool) func(kvs map[string]string) []ctlres.StringMapAppendMod { - return func(kvs map[string]string) []ctlres.StringMapAppendMod { - var mods []ctlres.StringMapAppendMod - for _, config := range c.configs { - for _, rule := range config.LabelScopingRules { - if rule.IsDefault && !defaultRules { - continue - } - mods = append(mods, rule.AsMod(kvs)) - } - } - return mods - } -} - -func (c Conf) TemplateRules() []TemplateRule { - var result []TemplateRule - for _, config := range c.configs { - result = append(result, config.TemplateRules...) - } - return result -} - -func (c Conf) PreflightRules() []PreflightRule { - var result []PreflightRule - for _, config := range c.configs { - result = append(result, config.PreflightRules...) - } - return result -} - -func (c Conf) DiffMaskRules() []DiffMaskRule { - var result []DiffMaskRule - for _, config := range c.configs { - result = append(result, config.DiffMaskRules...) - } - return result -} - -func (c Conf) AdditionalLabels() map[string]string { - result := map[string]string{} - for _, config := range c.configs { - for k, v := range config.AdditionalLabels { - result[k] = v - } - } - return result -} - -func (c Conf) ChangeGroupBindings() []ChangeGroupBinding { - var result []ChangeGroupBinding - for _, config := range c.configs { - result = append(result, config.ChangeGroupBindings...) - } - return result -} - -func (c Conf) ChangeRuleBindings() []ChangeRuleBinding { - var result []ChangeRuleBinding - for _, config := range c.configs { - result = append(result, config.ChangeRuleBindings...) - } - return result -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/config/config.go b/vendor/carvel.dev/kapp/pkg/kapp/config/config.go deleted file mode 100644 index 06efb8616..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/config/config.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - "carvel.dev/kapp/pkg/kapp/version" - "carvel.dev/kapp/pkg/kapp/yttresmod" - semver "github.com/hashicorp/go-version" - "sigs.k8s.io/yaml" -) - -const ( - configAPIVersion = "kapp.k14s.io/v1alpha1" - configKind = "Config" -) - -type Config struct { - APIVersion string `json:"apiVersion"` - Kind string - - MinimumRequiredVersion string `json:"minimumRequiredVersion,omitempty"` - - RebaseRules []RebaseRule - WaitRules []WaitRule - OwnershipLabelRules []OwnershipLabelRule - LabelScopingRules []LabelScopingRule - TemplateRules []TemplateRule - DiffMaskRules []DiffMaskRule - PreflightRules []PreflightRule - - AdditionalLabels map[string]string - DiffAgainstLastAppliedFieldExclusionRules []DiffAgainstLastAppliedFieldExclusionRule - DiffAgainstExistingFieldExclusionRules []DiffAgainstExistingFieldExclusionRule - - // TODO additional? - // TODO validations - ChangeGroupBindings []ChangeGroupBinding - ChangeRuleBindings []ChangeRuleBinding -} - -type WaitRule struct { - SupportsObservedGeneration bool - ConditionMatchers []WaitRuleConditionMatcher - ResourceMatchers []ResourceMatcher - Ytt *WaitRuleYtt -} - -type WaitRuleConditionMatcher struct { - Type string - Status string - Failure bool - Success bool - SupportsObservedGeneration bool - UnblockChanges bool - Timeout string -} - -type WaitRuleYtt struct { - // Contracts are named and versioned (eg v1) - // to provide a stable interface to rule authors. - // Multiple contracts will be offered at the same time - // so that existing rules do not not break as we decide to evolve running environment. - FuncContractV1 *FuncContractV1 `json:"funcContractV1"` -} - -type FuncContractV1 struct { - Resource string `json:"resource.star"` -} - -type RebaseRule struct { - ResourceMatchers []ResourceMatcher - - Path ctlres.Path - Paths []ctlres.Path - Type string - Sources []ctlres.FieldCopyModSource - - Ytt *RebaseRuleYtt -} - -type RebaseRuleYtt struct { - // Contracts are named (eg overlay) and versioned (eg v1) - // to provide a stable interface to rule authors. - // Multiple contracts will be offered at the same time - // so that existing rules do not not break as we decide to evolve running environment. - OverlayContractV1 *RebaseRuleYttOverlayContractV1 `json:"overlayContractV1"` -} - -type RebaseRuleYttOverlayContractV1 struct { - OverlayYAML string `json:"overlay.yml"` -} - -type DiffAgainstLastAppliedFieldExclusionRule struct { - ResourceMatchers []ResourceMatcher - Path ctlres.Path -} - -type DiffAgainstExistingFieldExclusionRule struct { - ResourceMatchers []ResourceMatcher - Path ctlres.Path -} - -type OwnershipLabelRule struct { - ResourceMatchers []ResourceMatcher - Path ctlres.Path -} - -type LabelScopingRule struct { - ResourceMatchers []ResourceMatcher - Path ctlres.Path - IsDefault bool `json:"isDefault"` -} - -type TemplateRule struct { - ResourceMatchers []ResourceMatcher - AffectedResources TemplateAffectedResources -} - -type DiffMaskRule struct { - ResourceMatchers []ResourceMatcher - Path ctlres.Path -} - -type TemplateAffectedResources struct { - ObjectReferences []TemplateAffectedObjRef - // TODO support label injections? -} - -type TemplateAffectedObjRef struct { - ResourceMatchers []ResourceMatcher - Path ctlres.Path - NameKey string `json:"nameKey"` -} - -type ChangeGroupBinding struct { - Name string - ResourceMatchers []ResourceMatcher -} - -type ChangeRuleBinding struct { - Rules []string - IgnoreIfCyclical bool - ResourceMatchers []ResourceMatcher -} - -type PreflightRule struct { - Name string - Config map[string]any -} - -func NewConfigFromResource(res ctlres.Resource) (Config, error) { - if res.APIVersion() != configAPIVersion { - return Config{}, fmt.Errorf( - "Expected kapp config to have apiVersion '%s', but was '%s'", - configAPIVersion, res.APIVersion()) - } - - if res.Kind() != configKind { - return Config{}, fmt.Errorf( - "Expected kapp config to have kind '%s', but was '%s'", - configKind, res.Kind()) - } - - bs, err := res.AsYAMLBytes() - if err != nil { - return Config{}, err - } - - return newConfigFromYAMLBytes(bs, res.Description()) -} - -func newConfigFromYAMLBytes(bs []byte, description string) (Config, error) { - var config Config - err := yaml.Unmarshal(bs, &config) - if err != nil { - return Config{}, fmt.Errorf("Unmarshaling %s: %w", description, err) - } - - err = config.Validate() - if err != nil { - return Config{}, fmt.Errorf("Validating config: %w", err) - } - - return config, nil -} - -func (c Config) Validate() error { - if c.APIVersion != configAPIVersion { - return fmt.Errorf("Validating apiVersion: Unknown version (known: %s)", configAPIVersion) - } - if c.Kind != configKind { - return fmt.Errorf("Validating kind: Unknown kind (known: %s)", configKind) - } - - if len(c.MinimumRequiredVersion) > 0 { - if c.MinimumRequiredVersion[0] == 'v' { - return fmt.Errorf("Validating minimum version: Must not have prefix 'v' (e.g. '0.8.0')") - } - - userConstraint, err := semver.NewConstraint(">=" + c.MinimumRequiredVersion) - if err != nil { - return fmt.Errorf("Parsing minimum version constraint: %w", err) - } - - kappVersion, err := semver.NewVersion(version.Version) - if err != nil { - return fmt.Errorf("Parsing version constraint: %w", err) - } - - if !userConstraint.Check(kappVersion) { - return fmt.Errorf("kapp version '%s' does "+ - "not meet the minimum required version '%s'", version.Version, c.MinimumRequiredVersion) - } - } - - for i, rule := range c.RebaseRules { - err := rule.Validate() - if err != nil { - return fmt.Errorf("Validating rebase rule %d: %w", i, err) - } - } - - return nil -} - -func (r RebaseRule) Validate() error { - if r.Ytt != nil { - if len(r.Path) > 0 || len(r.Paths) > 0 || len(r.Type) > 0 || len(r.Sources) > 0 { - return fmt.Errorf("Expected only resourceMatchers specified with ytt configuration") - } - return nil - } - if len(r.Path) > 0 && len(r.Paths) > 0 { - return fmt.Errorf("Expected only one of path or paths specified") - } - if len(r.Path) == 0 && len(r.Paths) == 0 { - return fmt.Errorf("Expected either path or paths to be specified") - } - return nil -} - -func (r RebaseRule) AsMods() []ctlres.ResourceModWithMultiple { - if r.Ytt != nil { - switch { - case r.Ytt.OverlayContractV1 != nil: - return []ctlres.ResourceModWithMultiple{yttresmod.OverlayContractV1Mod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - OverlayYAML: r.Ytt.OverlayContractV1.OverlayYAML, - }} - - default: - panic("Unknown rebase rule ytt contract (supported: overlayContractV1)") - } - } - - var mods []ctlres.ResourceModWithMultiple - var paths []ctlres.Path - - if len(r.Paths) == 0 { - paths = append(paths, r.Path) - } else { - paths = r.Paths - } - - for _, path := range paths { - switch r.Type { - case "copy": - mods = append(mods, ctlres.FieldCopyMod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - Path: path, - Sources: r.Sources, - }) - - case "remove": - mods = append(mods, ctlres.FieldRemoveMod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - Path: path, - }) - - default: - panic(fmt.Sprintf("Unknown rebase rule type: %s (supported: copy, remove)", r.Type)) // TODO - } - } - - return mods -} - -func (r DiffAgainstLastAppliedFieldExclusionRule) AsMod() ctlres.FieldRemoveMod { - return ctlres.FieldRemoveMod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - Path: r.Path, - } -} -func (r DiffAgainstExistingFieldExclusionRule) AsMod() ctlres.FieldRemoveMod { - return ctlres.FieldRemoveMod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - Path: r.Path, - } -} - -func (r OwnershipLabelRule) AsMod(kvs map[string]string) ctlres.StringMapAppendMod { - return ctlres.StringMapAppendMod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - Path: r.Path, - KVs: kvs, - } -} - -func (r LabelScopingRule) AsMod(kvs map[string]string) ctlres.StringMapAppendMod { - return ctlres.StringMapAppendMod{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - }, - Path: r.Path, - SkipIfNotFound: true, - KVs: kvs, - } -} - -func (r WaitRule) ResourceMatcher() ctlres.ResourceMatcher { - return ctlres.AnyMatcher{ - Matchers: ResourceMatchers(r.ResourceMatchers).AsResourceMatchers(), - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/config/default.go b/vendor/carvel.dev/kapp/pkg/kapp/config/default.go deleted file mode 100644 index dc0edca58..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/config/default.go +++ /dev/null @@ -1,732 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -const defaultConfigYAML = `--- -apiVersion: kapp.k14s.io/v1alpha1 -kind: Config - -rebaseRules: -# Copy over all metadata (with resourceVersion, etc.) -- path: [metadata] - type: copy - sources: [existing] - resourceMatchers: - - allMatcher: {} - -# Be specific about labels to be applied -- path: [metadata, labels] - type: remove - resourceMatchers: - - allMatcher: {} -- path: [metadata, labels] - type: copy - sources: [new] - resourceMatchers: - - allMatcher: {} - -# Be specific about annotations to be applied -- path: [metadata, annotations] - type: remove - resourceMatchers: - - allMatcher: {} -- path: [metadata, annotations] - type: copy - sources: [new] - resourceMatchers: - - allMatcher: {} - -# Prefer user provided, but allow cluster set -- paths: - - [spec, clusterIP] - - [spec, healthCheckNodePort] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Service} - -# Prefer user provided, but allow cluster set -- path: [spec, finalizers] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Namespace} - -# Openshift adds some annotations and labels to namespaces -- paths: - - [metadata, annotations, openshift.io/sa.scc.mcs] - - [metadata, annotations, openshift.io/sa.scc.supplemental-groups] - - [metadata, annotations, openshift.io/sa.scc.uid-range] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Namespace} - -# PVC -- paths: - - [metadata, annotations, pv.kubernetes.io/bind-completed] - - [metadata, annotations, pv.kubernetes.io/bound-by-controller] - - [metadata, annotations, pv.kubernetes.io/migrated-to] - - [metadata, annotations, volume.beta.kubernetes.io/storage-provisioner] - - [spec, storageClassName] - - [spec, volumeMode] - - [spec, volumeName] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: PersistentVolumeClaim} - -- path: [metadata, annotations, "deployment.kubernetes.io/revision"] - type: copy - sources: [new, existing] - resourceMatchers: &appsV1DeploymentWithRevAnnKey - - apiVersionKindMatcher: {apiVersion: apps/v1, kind: Deployment} - - apiVersionKindMatcher: {apiVersion: apps/v1beta1, kind: Deployment} - - apiVersionKindMatcher: {apiVersion: apps/v1beta2, kind: Deployment} - - apiVersionKindMatcher: {apiVersion: extensions/v1beta1, kind: Deployment} - -- path: [webhooks, {allIndexes: true}, clientConfig, caBundle] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: admissionregistration.k8s.io/v1beta1, kind: MutatingWebhookConfiguration} - - apiVersionKindMatcher: {apiVersion: admissionregistration.k8s.io/v1, kind: MutatingWebhookConfiguration} - - apiVersionKindMatcher: {apiVersion: admissionregistration.k8s.io/v1beta1, kind: ValidatingWebhookConfiguration} - - apiVersionKindMatcher: {apiVersion: admissionregistration.k8s.io/v1, kind: ValidatingWebhookConfiguration} - -- path: [spec, caBundle] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: apiregistration.k8s.io/v1beta1, kind: APIService} - - apiVersionKindMatcher: {apiVersion: apiregistration.k8s.io/v1, kind: APIService} - -- path: [spec, conversion, webhookClientConfig, caBundle] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: apiextensions.k8s.io/v1beta1, kind: CustomResourceDefinition} - -- path: [spec, conversion, webhook, clientConfig, caBundle] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: apiextensions.k8s.io/v1, kind: CustomResourceDefinition} - -- path: [spec, nodeName] - type: copy - sources: [new, existing] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Pod} - -# ServiceAccount controller appends secret named '${metadata.name}-token-${rand}' after the save -# Openshift adds a secret and an imagePullSecret named '${metadata.name}-dockercfg-${rand}' after the save -- ytt: - overlayContractV1: - overlay.yml: | - #@ load("@ytt:data", "data") - #@ load("@ytt:overlay", "overlay") - - #@ res_name = data.values.existing.metadata.name - - #! service account may be created with empty secrets - #@ secrets = [] - #@ if hasattr(data.values.existing, "secrets"): - #@ secrets = data.values.existing.secrets - #@ end - - #@ imagePullSecrets = [] - #@ if hasattr(data.values.existing, "imagePullSecrets"): - #@ imagePullSecrets = data.values.existing.imagePullSecrets - #@ end - - #@ token_secret_name = None - #@ token_secret_name_docker = None - #@ for k in secrets: - #@ if k.name.startswith(res_name+"-token-"): - #@ token_secret_name = k.name - #@ end - #@ if k.name.startswith(res_name+"-dockercfg-"): - #@ token_secret_name_docker = k.name - #@ end - #@ end - - #@ image_pull_secret_name = None - #@ for k in imagePullSecrets: - #@ if k.name.startswith(res_name+"-dockercfg-"): - #@ image_pull_secret_name = k.name - #@ end - #@ end - - #! in case token secret name is not included, do not modify anything - - #@ if/end image_pull_secret_name: - #@overlay/match by=overlay.all - --- - #@overlay/match missing_ok=True - imagePullSecrets: - #@overlay/match by=overlay.subset({"name": image_pull_secret_name}),when=0 - - name: #@ image_pull_secret_name - - #@ if/end token_secret_name: - #@overlay/match by=overlay.all - --- - #@overlay/match missing_ok=True - secrets: - #@overlay/match by=overlay.subset({"name": token_secret_name}),when=0 - - name: #@ token_secret_name - - #@ if/end token_secret_name_docker: - #@overlay/match by=overlay.all - --- - #@overlay/match missing_ok=True - secrets: - #@overlay/match by=overlay.subset({"name": token_secret_name_docker}),when=0 - - name: #@ token_secret_name_docker - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: ServiceAccount} - -# Secretgen populates secret data for annotated secrets -- paths: - - [data, .dockerconfigjson] - - [metadata, annotations, secretgen.carvel.dev/status] - type: copy - sources: [existing, new] - resourceMatchers: - - andMatcher: - matchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Secret} - - hasAnnotationMatcher: - keys: [secretgen.carvel.dev/image-pull-secret] - - notMatcher: - matcher: - hasAnnotationMatcher: - keys: [kapp.k14s.io/disable-default-secretgen-rebase-rules] - -# aggregated ClusterRole rules are filled in by the control plane at runtime -# refs https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles -- paths: - - [rules] - type: copy - sources: [existing] - resourceMatchers: - - andMatcher: - matchers: - - anyMatcher: - matchers: - - apiVersionKindMatcher: {kind: ClusterRole, apiVersion: rbac.authorization.k8s.io/v1} - - apiVersionKindMatcher: {kind: ClusterRole, apiVersion: rbac.authorization.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: ClusterRole, apiVersion: rbac.authorization.k8s.io/v1beta1} - - notMatcher: - matcher: - emptyFieldMatcher: - path: [aggregationRule] - -diffAgainstLastAppliedFieldExclusionRules: -- path: [metadata, annotations, "deployment.kubernetes.io/revision"] - resourceMatchers: *appsV1DeploymentWithRevAnnKey - -diffAgainstExistingFieldExclusionRules: -- path: [status] - resourceMatchers: - - allMatcher: {} - -diffMaskRules: -- path: [data] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Secret} -- path: [stringData] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Secret} - -ownershipLabelRules: -- path: [metadata, labels] - resourceMatchers: - - allMatcher: {} - -- path: [spec, template, metadata, labels] - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: &disableDefaultOwnershipLabelRulesAnnMatcher - hasAnnotationMatcher: - keys: [kapp.k14s.io/disable-default-ownership-label-rules] - - anyMatcher: - matchers: &withPodTemplate - # Deployment - - apiVersionKindMatcher: {apiVersion: apps/v1, kind: Deployment} - - apiVersionKindMatcher: {apiVersion: apps/v1beta2, kind: Deployment} - - apiVersionKindMatcher: {apiVersion: apps/v1beta1, kind: Deployment} - - apiVersionKindMatcher: {apiVersion: extensions/v1beta1, kind: Deployment} - # ReplicaSet - - apiVersionKindMatcher: {apiVersion: apps/v1, kind: ReplicaSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta2, kind: ReplicaSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta1, kind: ReplicaSet} - - apiVersionKindMatcher: {apiVersion: extensions/v1beta1, kind: ReplicaSet} - # StatefulSet - - apiVersionKindMatcher: {apiVersion: apps/v1, kind: StatefulSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta2, kind: StatefulSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta1, kind: StatefulSet} - - apiVersionKindMatcher: {apiVersion: extensions/v1beta1, kind: StatefulSet} - # DaemonSet - - apiVersionKindMatcher: {apiVersion: apps/v1, kind: DaemonSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta2, kind: DaemonSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta1, kind: DaemonSet} - - apiVersionKindMatcher: {apiVersion: extensions/v1beta1, kind: DaemonSet} - # Job - - apiVersionKindMatcher: {apiVersion: batch/v1, kind: Job} - -# TODO It seems that these labels are being ignored -# https://github.com/kubernetes/kubernetes/issues/74916 -- path: [spec, volumeClaimTemplates, {allIndexes: true}, metadata, labels] - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: *disableDefaultOwnershipLabelRulesAnnMatcher - - anyMatcher: - matchers: - # StatefulSet - - apiVersionKindMatcher: {apiVersion: apps/v1, kind: StatefulSet} - - apiVersionKindMatcher: {apiVersion: apps/v1beta1, kind: StatefulSet} - - apiVersionKindMatcher: {apiVersion: extensions/v1beta1, kind: StatefulSet} - -- path: [spec, template, metadata, labels] - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: *disableDefaultOwnershipLabelRulesAnnMatcher - - anyMatcher: - matchers: - - apiVersionKindMatcher: {apiVersion: batch/v1, kind: Job} - - apiVersionKindMatcher: {apiVersion: batch/v1beta1, kind: Job} - - apiVersionKindMatcher: {apiVersion: batch/v2alpha1, kind: Job} - -- path: [spec, jobTemplate, spec, template, metadata, labels] - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: *disableDefaultOwnershipLabelRulesAnnMatcher - - anyMatcher: - matchers: &cronJob - - apiVersionKindMatcher: {apiVersion: batch/v1, kind: CronJob} - - apiVersionKindMatcher: {apiVersion: batch/v1beta1, kind: CronJob} - - apiVersionKindMatcher: {apiVersion: batch/v2alpha1, kind: CronJob} - -labelScopingRules: -- path: [spec, selector] - isDefault: true - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - # Keep older annotation for backwards compatibility - matcher: &disableLabelScopingAnnMatcher - hasAnnotationMatcher: - keys: [kapp.k14s.io/disable-label-scoping] - - notMatcher: - matcher: &disableDefaultLabelScopingRulesAnnMatcher - hasAnnotationMatcher: - keys: [kapp.k14s.io/disable-default-label-scoping-rules] - - apiVersionKindMatcher: {apiVersion: v1, kind: Service} - -- path: [spec, selector, matchLabels] - isDefault: true - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: *disableLabelScopingAnnMatcher - - notMatcher: - matcher: *disableDefaultLabelScopingRulesAnnMatcher - - anyMatcher: - matchers: *withPodTemplate - -- path: [spec, selector, matchLabels] - isDefault: true - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: *disableLabelScopingAnnMatcher - - notMatcher: - matcher: *disableDefaultLabelScopingRulesAnnMatcher - - apiVersionKindMatcher: {apiVersion: policy/v1beta1, kind: PodDisruptionBudget} - -templateRules: -- resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: ConfigMap} - affectedResources: - objectReferences: - - path: [spec, template, spec, containers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, configMapKeyRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, containers, {allIndexes: true}, envFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, initContainers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, configMapKeyRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, initContainers, {allIndexes: true}, envFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, volumes, {allIndexes: true}, projected, sources, {allIndexes: true}, configMap] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, volumes, {allIndexes: true}, configMap] - resourceMatchers: *withPodTemplate - - - path: [spec, jobTemplate, spec, template, spec, containers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, configMapKeyRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, containers, {allIndexes: true}, envFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, initContainers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, configMapKeyRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, initContainers, {allIndexes: true}, envFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, volumes, {allIndexes: true}, projected, sources, {allIndexes: true}, configMap] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, volumes, {allIndexes: true}, configMap] - resourceMatchers: *cronJob - - - path: [spec, volumes, {allIndexes: true}, configMap] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Pod} - - - path: [spec, fetch, {allIndexes: true}, inline, pathsFrom, {allIndexes: true}, configMapRef] - resourceMatchers: &appMatchers - - apiVersionKindMatcher: {apiVersion: kappctrl.k14s.io/v1alpha1, kind: App} - - path: [spec, template, {allIndexes: true}, ytt, inline, pathsFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, ytt, valuesFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, helmTemplate, valuesFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, cue, valuesFrom, {allIndexes: true}, configMapRef] - resourceMatchers: *appMatchers - - - path: [spec, fetch, inline, pathsFrom, {allIndexes: true}, configMapRef] - resourceMatchers: &packageRepositoryMatchers - - apiVersionKindMatcher: {apiVersion: packaging.carvel.dev/v1alpha1, kind: PackageRepository} - -- resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Secret} - affectedResources: - objectReferences: - - path: [spec, template, spec, containers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, secretKeyRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, containers, {allIndexes: true}, envFrom, {allIndexes: true}, secretRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, initContainers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, secretKeyRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, initContainers, {allIndexes: true}, envFrom, {allIndexes: true}, secretRef] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, imagePullSecrets, {allIndexes: true}] - resourceMatchers: *withPodTemplate - - path: [spec, template, spec, volumes, {allIndexes: true}, secret] - resourceMatchers: *withPodTemplate - nameKey: secretName - - path: [spec, template, spec, volumes, {allIndexes: true}, projected, sources, {allIndexes: true}, secret] - resourceMatchers: *withPodTemplate - - - path: [spec, jobTemplate, spec, template, spec, containers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, secretKeyRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, containers, {allIndexes: true}, envFrom, {allIndexes: true}, secretRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, initContainers, {allIndexes: true}, env, {allIndexes: true}, valueFrom, secretKeyRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, initContainers, {allIndexes: true}, envFrom, {allIndexes: true}, secretRef] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, imagePullSecrets, {allIndexes: true}] - resourceMatchers: *cronJob - - path: [spec, jobTemplate, spec, template, spec, volumes, {allIndexes: true}, secret] - resourceMatchers: *cronJob - nameKey: secretName - - path: [spec, jobTemplate, spec, template, spec, volumes, {allIndexes: true}, projected, sources, {allIndexes: true}, secret] - resourceMatchers: *cronJob - - - path: [spec, volumes, {allIndexes: true}, secret] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Pod} - nameKey: secretName - - path: [spec, imagePullSecrets, {allIndexes: true}] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: Pod} - - path: [imagePullSecrets, {allIndexes: true}] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: ServiceAccount} - - path: [secrets, {allIndexes: true}] - resourceMatchers: - - apiVersionKindMatcher: {apiVersion: v1, kind: ServiceAccount} - - - path: [spec, cluster, kubeconfigSecretRef] - resourceMatchers: *appMatchers - - path: [spec, fetch, {allIndexes: true}, inline, pathsFrom, {allIndexes: true}, secretRef] - resourceMatchers: *appMatchers - - path: [spec, fetch, {allIndexes: true}, imgpkgBundle, secretRef] - resourceMatchers: *appMatchers - - path: [spec, fetch, {allIndexes: true}, http, secretRef] - resourceMatchers: *appMatchers - - path: [spec, fetch, {allIndexes: true}, git, secretRef] - resourceMatchers: *appMatchers - - path: [spec, fetch, {allIndexes: true}, helmChart, repository, secretRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, ytt, inline, pathsFrom, {allIndexes: true}, secretRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, ytt, valuesFrom, {allIndexes: true}, secretRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, helmTemplate, valuesFrom, {allIndexes: true}, secretRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, cue, valuesFrom, {allIndexes: true}, secretRef] - resourceMatchers: *appMatchers - - path: [spec, template, {allIndexes: true}, sops, pgp, privateKeySecretRef] - resourceMatchers: *appMatchers - - - path: [spec, values, {allIndexes: true}, secretRef] - resourceMatchers: &packageInstallMatchers - - apiVersionKindMatcher: {apiVersion: packaging.carvel.dev/v1alpha1, kind: PackageInstall} - - path: [spec, cluster, kubeconfigSecretRef] - resourceMatchers: *packageInstallMatchers - - - path: [spec, fetch, inline, pathsFrom, {allIndexes: true}, secretRef] - resourceMatchers: *packageRepositoryMatchers - -changeGroupBindings: -- name: change-groups.kapp.k14s.io/crds - resourceMatchers: &crdMatchers - - apiGroupKindMatcher: {kind: CustomResourceDefinition, apiGroup: apiextensions.k8s.io} - -- name: change-groups.kapp.k14s.io/crds-{crd-group}-{crd-kind} - resourceMatchers: *crdMatchers - -- name: change-groups.kapp.k14s.io/namespaces - resourceMatchers: &namespaceMatchers - - apiGroupKindMatcher: {kind: Namespace, apiGroup: ""} - -- name: change-groups.kapp.k14s.io/namespaces-{name} - resourceMatchers: *namespaceMatchers - -- name: change-groups.kapp.k14s.io/storage-class - resourceMatchers: &storageClassMatchers - - apiVersionKindMatcher: {kind: StorageClass, apiVersion: storage/v1} - - apiVersionKindMatcher: {kind: StorageClass, apiVersion: storage/v1beta1} - -- name: change-groups.kapp.k14s.io/storage - resourceMatchers: &storageMatchers - - apiVersionKindMatcher: {kind: PersistentVolume, apiVersion: v1} - - apiVersionKindMatcher: {kind: PersistentVolumeClaim, apiVersion: v1} - -- name: change-groups.kapp.k14s.io/rbac-roles - resourceMatchers: &rbacRoleMatchers - - apiVersionKindMatcher: {kind: ClusterRole, apiVersion: rbac.authorization.k8s.io/v1} - - apiVersionKindMatcher: {kind: ClusterRole, apiVersion: rbac.authorization.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: ClusterRole, apiVersion: rbac.authorization.k8s.io/v1beta1} - - apiVersionKindMatcher: {kind: Role, apiVersion: rbac.authorization.k8s.io/v1} - - apiVersionKindMatcher: {kind: Role, apiVersion: rbac.authorization.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: Role, apiVersion: rbac.authorization.k8s.io/v1beta1} - -- name: change-groups.kapp.k14s.io/rbac-role-bindings - resourceMatchers: &rbacRoleBindingMatchers - - apiVersionKindMatcher: {kind: ClusterRoleBinding, apiVersion: rbac.authorization.k8s.io/v1} - - apiVersionKindMatcher: {kind: ClusterRoleBinding, apiVersion: rbac.authorization.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: ClusterRoleBinding, apiVersion: rbac.authorization.k8s.io/v1beta1} - - apiVersionKindMatcher: {kind: RoleBinding, apiVersion: rbac.authorization.k8s.io/v1} - - apiVersionKindMatcher: {kind: RoleBinding, apiVersion: rbac.authorization.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: RoleBinding, apiVersion: rbac.authorization.k8s.io/v1beta1} - -- name: change-groups.kapp.k14s.io/rbac - resourceMatchers: &rbacMatchers - - anyMatcher: - matchers: - - anyMatcher: {matchers: *rbacRoleMatchers} - - anyMatcher: {matchers: *rbacRoleBindingMatchers} - -- name: change-groups.kapp.k14s.io/pod-related - resourceMatchers: &podRelatedMatchers - - apiVersionKindMatcher: {kind: NetworkPolicy, apiVersion: extensions/v1beta1} - - apiVersionKindMatcher: {kind: NetworkPolicy, apiVersion: networking.k8s.io/v1} - - apiVersionKindMatcher: {kind: ResourceQuota, apiVersion: v1} - - apiVersionKindMatcher: {kind: LimitRange, apiVersion: v1} - - apiVersionKindMatcher: {kind: PodSecurityPolicy, apiVersion: extensions/v1beta1} - - apiVersionKindMatcher: {kind: PodSecurityPolicy, apiVersion: policy/v1beta1} - - apiVersionKindMatcher: {kind: PodDisruptionBudget, apiVersion: policy/v1beta1} - - apiVersionKindMatcher: {kind: PriorityClass, apiVersion: scheduling.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: PriorityClass, apiVersion: scheduling.k8s.io/v1beta1} - - apiVersionKindMatcher: {kind: PriorityClass, apiVersion: scheduling.k8s.io/v1} - - apiVersionKindMatcher: {kind: RuntimeClass, apiVersion: node.k8s.io/v1alpha1} - - apiVersionKindMatcher: {kind: RuntimeClass, apiVersion: node.k8s.io/v1beta1} - - apiVersionKindMatcher: {kind: ServiceAccount, apiVersion: v1} - - apiVersionKindMatcher: {kind: Secret, apiVersion: v1} - - apiVersionKindMatcher: {kind: ConfigMap, apiVersion: v1} - # [Note]: Do not add Service into this group as it may - # delay other resources with load balancer provisioning - # - apiVersionKindMatcher: {kind: Service, apiVersion: v1} - -- name: change-groups.kapp.k14s.io/serviceaccount - resourceMatchers: &serviceAccountMatchers - - apiVersionKindMatcher: {kind: ServiceAccount, apiVersion: v1} - -- name: change-groups.kapp.k14s.io/secret-associated-with-sa - resourceMatchers: - - andMatcher: - matchers: - - apiVersionKindMatcher: {kind: Secret, apiVersion: v1} - - hasAnnotationMatcher: - keys: [kubernetes.io/service-account.name] - -- name: change-groups.kapp.k14s.io/kapp-controller-app - resourceMatchers: *appMatchers - -- name: change-groups.kapp.k14s.io/kapp-controller-packageinstall - resourceMatchers: *packageInstallMatchers - -changeRuleBindings: -# Insert CRDs before all CRs -- rules: - - "upsert after upserting change-groups.kapp.k14s.io/crds-{api-group}-{kind}" - resourceMatchers: - - andMatcher: - matchers: - - customResourceMatcher: {} - - notMatcher: - matcher: &disableDefaultChangeGroupAnnMatcher - hasAnnotationMatcher: - keys: [kapp.k14s.io/disable-default-change-group-and-rules] - -# Create SA before creating secret associated with SA -- rules: - - "upsert before upserting change-groups.kapp.k14s.io/secret-associated-with-sa" - resourceMatchers: - - apiVersionKindMatcher: {kind: ServiceAccount, apiVersion: v1} - -# Delete CRs before CRDs to retain detailed observability -# instead of having CRD deletion trigger all CR deletion -- rules: - - "delete before deleting change-groups.kapp.k14s.io/crds" - ignoreIfCyclical: true - resourceMatchers: - - andMatcher: - matchers: - - customResourceMatcher: {} - - notMatcher: - matcher: *disableDefaultChangeGroupAnnMatcher - -# Delete non-CRs after deleting CRDs so that if CRDs use conversion -# webhooks it's more likely that backing webhook resources are still -# available during deletion of CRs -- rules: - - "delete after deleting change-groups.kapp.k14s.io/crds" - ignoreIfCyclical: true - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: - matcher: - customResourceMatcher: {} - - notMatcher: - matcher: - anyMatcher: - matchers: *crdMatchers - - notMatcher: - matcher: *disableDefaultChangeGroupAnnMatcher - -# Insert namespaces before all namespaced resources -- rules: - - "upsert after upserting change-groups.kapp.k14s.io/namespaces-{namespace}" - resourceMatchers: - - andMatcher: - matchers: - - hasNamespaceMatcher: {} - - notMatcher: - matcher: *disableDefaultChangeGroupAnnMatcher - -# Delete namespaces after deleting namespaced SAs so that resources like -# kapp-controller PackageInstalls can be deleted gracefully. -- rules: - - "delete before deleting change-groups.kapp.k14s.io/namespaces-{namespace}" - ignoreIfCyclical: true - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: {matcher: *disableDefaultChangeGroupAnnMatcher} - - anyMatcher: {matchers: *serviceAccountMatchers} - -# Insert roles/ClusterRoles before inserting any roleBinding/ClusterRoleBinding -# Sometimes Binding Creation fail as corresponding Role is not created. -# https://github.com/carvel-dev/kapp/issues/145 -- rules: - - "upsert after upserting change-groups.kapp.k14s.io/rbac-roles" - ignoreIfCyclical: true - resourceMatchers: - - andMatcher: - matchers: - - anyMatcher: {matchers: *rbacRoleBindingMatchers} - - notMatcher: - matcher: *disableDefaultChangeGroupAnnMatcher - -- rules: - - "upsert before upserting change-groups.kapp.k14s.io/kapp-controller-packageinstall" - - "upsert before upserting change-groups.kapp.k14s.io/kapp-controller-app" - - "delete after deleting change-groups.kapp.k14s.io/kapp-controller-packageinstall" - - "delete after deleting change-groups.kapp.k14s.io/kapp-controller-app" - ignoreIfCyclical: true - resourceMatchers: - - andMatcher: - matchers: - - notMatcher: {matcher: *disableDefaultChangeGroupAnnMatcher} - - anyMatcher: - matchers: - - anyMatcher: {matchers: *serviceAccountMatchers} - - anyMatcher: {matchers: *rbacMatchers} - -- rules: - - "upsert after upserting change-groups.kapp.k14s.io/storage-class" - ignoreIfCyclical: true - resourceMatchers: - - apiVersionKindMatcher: {kind: PersistentVolume, apiVersion: v1} - - apiVersionKindMatcher: {kind: PersistentVolumeClaim, apiVersion: v1} - -- rules: - # [Note]: prefer to apply pod related changes first to - # work better with applications that do not reload changes - - "upsert after upserting change-groups.kapp.k14s.io/pod-related" - # [Note]: prefer to apply rbac changes first to potentially - # avoid restarts of Pods that rely on correct permissions - - "upsert after upserting change-groups.kapp.k14s.io/rbac" - - "upsert after upserting change-groups.kapp.k14s.io/storage-class" - - "upsert after upserting change-groups.kapp.k14s.io/storage" - ignoreIfCyclical: true - resourceMatchers: - # [Note]: Apply all resources after pod-related change group as it's - # common for other resources to rely on ConfigMaps, Secrets, etc. - - andMatcher: - matchers: - - notMatcher: - matcher: - anyMatcher: - matchers: - - anyMatcher: {matchers: *storageClassMatchers} - - anyMatcher: {matchers: *storageMatchers} - - anyMatcher: {matchers: *rbacMatchers} - - anyMatcher: {matchers: *podRelatedMatchers} - - hasNamespaceMatcher: {} -` - -func NewDefaultConfigString() string { return defaultConfigYAML } - -func NewConfFromResourcesWithDefaults(resources []ctlres.Resource) ([]ctlres.Resource, Conf, error) { - resources, conf, err := NewConfFromResources(resources) - if err != nil { - return nil, Conf{}, err - } - - defaultConfig, err := newConfigFromYAMLBytes([]byte(defaultConfigYAML), "config/default (kapp.k14s.io/v1alpha1)") - if err != nil { - return nil, Conf{}, err - } - - return resources, Conf{append([]Config{defaultConfig}, conf.configs...)}, err -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/config/resource_matchers.go b/vendor/carvel.dev/kapp/pkg/kapp/config/resource_matchers.go deleted file mode 100644 index e893f6677..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/config/resource_matchers.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type ResourceMatchers []ResourceMatcher - -type ResourceMatcher struct { - AllMatcher *AllMatcher // default - AnyMatcher *AnyMatcher - NotMatcher *NotMatcher - AndMatcher *AndMatcher - APIGroupKindMatcher *APIGroupKindMatcher - APIVersionKindMatcher *APIVersionKindMatcher `json:"apiVersionKindMatcher"` - KindNamespaceNameMatcher *KindNamespaceNameMatcher - HasAnnotationMatcher *HasAnnotationMatcher - HasNamespaceMatcher *HasNamespaceMatcher - CustomResourceMatcher *CustomResourceMatcher - EmptyFieldMatcher *EmptyFieldMatcher -} - -type AllMatcher struct{} - -type AnyMatcher struct { - Matchers []ResourceMatcher -} - -type NotMatcher struct { - Matcher ResourceMatcher -} - -type AndMatcher struct { - Matchers []ResourceMatcher -} - -type APIGroupKindMatcher struct { - APIGroup string `json:"apiGroup"` - Kind string -} - -type APIVersionKindMatcher struct { - APIVersion string `json:"apiVersion"` - Kind string -} - -type KindNamespaceNameMatcher struct { - Kind string - Namespace string - Name string -} - -type HasAnnotationMatcher struct { - Keys []string -} - -type HasNamespaceMatcher struct { - Names []string -} - -type CustomResourceMatcher struct{} - -type EmptyFieldMatcher struct { - Path ctlres.Path -} - -func (ms ResourceMatchers) AsResourceMatchers() []ctlres.ResourceMatcher { - var result []ctlres.ResourceMatcher - for _, matcher := range ms { - result = append(result, matcher.AsResourceMatcher()) - } - return result -} - -func (m ResourceMatcher) AsResourceMatcher() ctlres.ResourceMatcher { - switch { - case m.AllMatcher != nil: - return ctlres.AllMatcher{} - - case m.AnyMatcher != nil: - return ctlres.AnyMatcher{ - Matchers: ResourceMatchers(m.AnyMatcher.Matchers).AsResourceMatchers(), - } - - case m.AndMatcher != nil: - return ctlres.AndMatcher{ - Matchers: ResourceMatchers(m.AndMatcher.Matchers).AsResourceMatchers(), - } - - case m.NotMatcher != nil: - return ctlres.NotMatcher{ - Matcher: m.NotMatcher.Matcher.AsResourceMatcher(), - } - - case m.KindNamespaceNameMatcher != nil: - return ctlres.KindNamespaceNameMatcher{ - Kind: m.KindNamespaceNameMatcher.Kind, - Namespace: m.KindNamespaceNameMatcher.Namespace, - Name: m.KindNamespaceNameMatcher.Name, - } - - case m.APIGroupKindMatcher != nil: - return ctlres.APIGroupKindMatcher{ - APIGroup: m.APIGroupKindMatcher.APIGroup, - Kind: m.APIGroupKindMatcher.Kind, - } - - case m.APIVersionKindMatcher != nil: - return ctlres.APIVersionKindMatcher{ - APIVersion: m.APIVersionKindMatcher.APIVersion, - Kind: m.APIVersionKindMatcher.Kind, - } - - case m.HasAnnotationMatcher != nil: - return ctlres.HasAnnotationMatcher{ - Keys: m.HasAnnotationMatcher.Keys, - } - - case m.HasNamespaceMatcher != nil: - return ctlres.HasNamespaceMatcher{ - Names: m.HasNamespaceMatcher.Names, - } - - case m.CustomResourceMatcher != nil: - return ctlres.CustomResourceMatcher{} - - case m.EmptyFieldMatcher != nil: - return ctlres.EmptyFieldMatcher{Path: m.EmptyFieldMatcher.Path} - - default: - panic(fmt.Sprintf("Unknown resource matcher specified: %#v", m)) - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/change_validator.go b/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/change_validator.go deleted file mode 100644 index f69e67410..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/change_validator.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package crdupgradesafety - -import ( - "bytes" - "errors" - "fmt" - "reflect" - - "github.com/openshift/crd-schema-checker/pkg/manifestcomparators" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -// ChangeValidation is a function that accepts a FieldDiff -// as a parameter and should return: -// - a boolean representation of whether or not the change -// - an error if the change would be unsafe -// has been fully handled (i.e no additional changes exist) -type ChangeValidation func(diff FieldDiff) (bool, error) - -// EnumChangeValidation ensures that: -// - No enums are added to a field that did not previously have -// enum restrictions -// - No enums are removed from a field -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e the only change was to enum values) -// - An error if either of the above validations are not satisfied -func EnumChangeValidation(diff FieldDiff) (bool, error) { - // This function resets the enum values for the - // old and new field and compares them to determine - // if there are any additional changes that should be - // handled. Reseting the enum values allows for chained - // evaluations to check if they have handled all the changes - // without having to account for fields other than the ones - // they are designed to handle. This function should only be called when - // returning from this function to prevent unnecessary overwrites of - // these fields. - handled := func() bool { - diff.Old.Enum = []v1.JSON{} - diff.New.Enum = []v1.JSON{} - return reflect.DeepEqual(diff.Old, diff.New) - } - - if len(diff.Old.Enum) == 0 && len(diff.New.Enum) > 0 { - return handled(), fmt.Errorf("enums added when there were no enum restrictions previously") - } - - oldSet := sets.NewString() - for _, enum := range diff.Old.Enum { - if !oldSet.Has(string(enum.Raw)) { - oldSet.Insert(string(enum.Raw)) - } - } - - newSet := sets.NewString() - for _, enum := range diff.New.Enum { - if !newSet.Has(string(enum.Raw)) { - newSet.Insert(string(enum.Raw)) - } - } - - diffSet := oldSet.Difference(newSet) - if diffSet.Len() > 0 { - return handled(), fmt.Errorf("enum values removed: %+v", diffSet.UnsortedList()) - } - - return handled(), nil -} - -// RequiredFieldChangeValidation adds a validation check to ensure that -// existing required fields can be marked as optional in a CRD schema: -// - No new values can be added as required that did not previously have -// any required fields present -// - Existing values can be removed from the required field -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to required field values) -// - An error if either of the above criteria are not met -func RequiredFieldChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.Required = []string{} - diff.New.Required = []string{} - return reflect.DeepEqual(diff.Old, diff.New) - } - - if len(diff.Old.Required) == 0 && len(diff.New.Required) > 0 { - return handled(), fmt.Errorf("new values added as required when previously no required fields existed: %+v", diff.New.Required) - } - - oldSet := sets.NewString() - for _, requiredField := range diff.Old.Required { - if !oldSet.Has(requiredField) { - oldSet.Insert(requiredField) - } - } - - newSet := sets.NewString() - for _, requiredField := range diff.New.Required { - if !newSet.Has(requiredField) { - newSet.Insert(requiredField) - } - } - - diffSet := newSet.Difference(oldSet) - if diffSet.Len() > 0 { - return handled(), fmt.Errorf("new required fields added: %+v", diffSet.UnsortedList()) - } - - return handled(), nil -} - -// MinimumChangeValidation adds a validation check to ensure that -// existing fields can have their minimum constraints updated in a CRD schema -// based on the following: -// - No minimum constraint can be added if one did not exist previously -// - Minimum constraints can not increase in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to minimum constraints) -// - An error if either of the above criteria are not met -func MinimumChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.Minimum = nil - diff.New.Minimum = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.Minimum == nil && diff.New.Minimum != nil: - m := *diff.New.Minimum - return handled(), fmt.Errorf("minimum constraint added when one did not exist previously: %+v", m) - case diff.Old.Minimum != nil && diff.New.Minimum != nil: - oldMin := *diff.Old.Minimum - newMin := *diff.New.Minimum - if oldMin < newMin { - return handled(), fmt.Errorf("minimum constraint increased from %+v to %+v", oldMin, newMin) - } - fallthrough - default: - return handled(), nil - } -} - -// MinimumLengthChangeValidation adds a validation check to ensure that -// existing fields can have their minimum length constraints updated in a CRD schema -// based on the following: -// - No minimum length constraint can be added if one did not exist previously -// - Minimum length constraints can not increase in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to minimum length constraints) -// - An error if either of the above criteria are not met -func MinimumLengthChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.MinLength = nil - diff.New.MinLength = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.MinLength == nil && diff.New.MinLength != nil: - m := *diff.New.MinLength - return handled(), fmt.Errorf("minimum length constraint added when one did not exist previously: %+v", m) - case diff.Old.MinLength != nil && diff.New.MinLength != nil: - oldMin := *diff.Old.MinLength - newMin := *diff.New.MinLength - if oldMin < newMin { - return handled(), fmt.Errorf("minimum length constraint increased from %+v to %+v", oldMin, newMin) - } - fallthrough - default: - return handled(), nil - } -} - -// MinimumItemsChangeValidation adds a validation check to ensure that -// existing fields can have their minimum item constraints updated in a CRD schema -// based on the following: -// - No minimum item constraint can be added if one did not exist previously -// - Minimum item constraints can not increase in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to minimum item constraints) -// - An error if either of the above criteria are not met -func MinimumItemsChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.MinItems = nil - diff.New.MinItems = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.MinItems == nil && diff.New.MinItems != nil: - m := *diff.New.MinItems - return handled(), fmt.Errorf("minimum items constraint added when one did not exist previously: %+v", m) - case diff.Old.MinItems != nil && diff.New.MinItems != nil: - oldMin := *diff.Old.MinItems - newMin := *diff.New.MinItems - if oldMin < newMin { - return handled(), fmt.Errorf("minimum items constraint increased from %+v to %+v", oldMin, newMin) - } - fallthrough - default: - return handled(), nil - } -} - -// MinimumPropertiesChangeValidation adds a validation check to ensure that -// existing fields can have their minimum properties constraints updated in a CRD schema -// based on the following: -// - No minimum properties constraint can be added if one did not exist previously -// - Minimum properties constraints can not increase in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to minimum properties constraints) -// - An error if either of the above criteria are not met -func MinimumPropertiesChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.MinProperties = nil - diff.New.MinProperties = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.MinProperties == nil && diff.New.MinProperties != nil: - m := *diff.New.MinProperties - return handled(), fmt.Errorf("minimum properties constraint added when one did not exist previously: %+v", m) - case diff.Old.MinProperties != nil && diff.New.MinProperties != nil: - oldMin := *diff.Old.MinProperties - newMin := *diff.New.MinProperties - if oldMin < newMin { - return handled(), fmt.Errorf("minimum properties constraint increased from %+v to %+v", oldMin, newMin) - } - fallthrough - default: - return handled(), nil - } -} - -// MaximumChangeValidation adds a validation check to ensure that -// existing fields can have their maximum constraints updated in a CRD schema -// based on the following: -// - No maximum constraint can be added if one did not exist previously -// - Maximum constraints can not decrease in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to maximum constraints) -// - An error if either of the above criteria are not met -func MaximumChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.Maximum = nil - diff.New.Maximum = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.Maximum == nil && diff.New.Maximum != nil: - m := *diff.New.Maximum - return handled(), fmt.Errorf("maximum constraint added when one did not exist previously: %+v", m) - case diff.Old.Maximum != nil && diff.New.Maximum != nil: - oldMax := *diff.Old.Maximum - newMax := *diff.New.Maximum - if newMax < oldMax { - return handled(), fmt.Errorf("maximum constraint decreased from %+v to %+v", oldMax, newMax) - } - fallthrough - default: - return handled(), nil - } -} - -// MaximumLengthChangeValidation adds a validation check to ensure that -// existing fields can have their maximum length constraints updated in a CRD schema -// based on the following: -// - No maximum length constraint can be added if one did not exist previously -// - Maximum length constraints can not decrease in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to maximum length constraints) -// - An error if either of the above criteria are not met -func MaximumLengthChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.MaxLength = nil - diff.New.MaxLength = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.MaxLength == nil && diff.New.MaxLength != nil: - m := *diff.New.MaxLength - return handled(), fmt.Errorf("maximum length constraint added when one did not exist previously: %+v", m) - case diff.Old.MaxLength != nil && diff.New.MaxLength != nil: - oldMax := *diff.Old.MaxLength - newMax := *diff.New.MaxLength - if newMax < oldMax { - return handled(), fmt.Errorf("maximum length constraint decreased from %+v to %+v", oldMax, newMax) - } - fallthrough - default: - return handled(), nil - } -} - -// MaximumItemsChangeValidation adds a validation check to ensure that -// existing fields can have their maximum item constraints updated in a CRD schema -// based on the following: -// - No maximum item constraint can be added if one did not exist previously -// - Maximum item constraints can not decrease in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to maximum item constraints) -// - An error if either of the above criteria are not met -func MaximumItemsChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.MaxItems = nil - diff.New.MaxItems = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.MaxItems == nil && diff.New.MaxItems != nil: - m := *diff.New.MaxItems - return handled(), fmt.Errorf("maximum items constraint added when one did not exist previously: %+v", m) - case diff.Old.MaxItems != nil && diff.New.MaxItems != nil: - oldMax := *diff.Old.MaxItems - newMax := *diff.New.MaxItems - if newMax < oldMax { - return handled(), fmt.Errorf("maximum items constraint decreased from %+v to %+v", oldMax, newMax) - } - fallthrough - default: - return handled(), nil - } -} - -// MaximumPropertiesChangeValidation adds a validation check to ensure that -// existing fields can have their maximum properties constraints updated in a CRD schema -// based on the following: -// - No maximum properties constraint can be added if one did not exist previously -// - Maximum properties constraints can not increase in value -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to maximum properties constraints) -// - An error if either of the above criteria are not met -func MaximumPropertiesChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.MaxProperties = nil - diff.New.MaxProperties = nil - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.MaxProperties == nil && diff.New.MaxProperties != nil: - m := *diff.New.MaxProperties - return handled(), fmt.Errorf("maximum properties constraint added when one did not exist previously: %+v", m) - case diff.Old.MaxProperties != nil && diff.New.MaxProperties != nil: - oldMax := *diff.Old.MaxProperties - newMax := *diff.New.MaxProperties - if newMax < oldMax { - return handled(), fmt.Errorf("maximum properties constraint decreased from %+v to %+v", oldMax, newMax) - } - fallthrough - default: - return handled(), nil - } -} - -// DefaultValueChangeValidation adds a validation check to ensure that -// default values are not changed in a CRD schema: -// - No new value can be added as default that did not previously have a -// default value present -// - Default value of a field cannot be changed -// - Existing default value for a field cannot be removed -// This function returns: -// - A boolean representation of whether or not the change -// has been fully handled (i.e. the only change was to a field's default value) -// - An error if either of the above criteria are not met -func DefaultValueChangeValidation(diff FieldDiff) (bool, error) { - handled := func() bool { - diff.Old.Default = &v1.JSON{} - diff.New.Default = &v1.JSON{} - return reflect.DeepEqual(diff.Old, diff.New) - } - - switch { - case diff.Old.Default == nil && diff.New.Default != nil: - newDefault := diff.New.Default - return handled(), fmt.Errorf("new value added as default when previously no default value existed: %+v", newDefault) - - case diff.Old.Default != nil && diff.New.Default == nil: - oldDefault := diff.Old.Default.Raw - return handled(), fmt.Errorf("default value has been removed when previously a default value existed: %+v", oldDefault) - - case diff.Old.Default != nil && diff.New.Default != nil: - oldDefault := diff.Old.Default.Raw - newDefault := diff.New.Default.Raw - if !bytes.Equal(diff.Old.Default.Raw, diff.New.Default.Raw) { - return handled(), fmt.Errorf("default value has been changed from %+v to %+v", oldDefault, newDefault) - } - fallthrough - default: - return handled(), nil - } -} - -// ChangeValidator is a Validation implementation focused on -// handling updates to existing fields in a CRD -type ChangeValidator struct { - // Validations is a slice of ChangeValidations - // to run against each changed field - Validations []ChangeValidation -} - -func (cv *ChangeValidator) Name() string { - return "ChangeValidator" -} - -// Validate will compare each version in the provided existing and new CRDs. -// Since the ChangeValidator is tailored to handling updates to existing fields in -// each version of a CRD. As such the following is assumed: -// - Validating the removal of versions during an update is handled outside of this -// validator. If a version in the existing version of the CRD does not exist in the new -// version that version of the CRD is skipped in this validator. -// - Removal of existing fields is unsafe. Regardless of whether or not this is handled -// by a validator outside this one, if a field is present in a version provided by the existing CRD -// but not present in the same version provided by the new CRD this validation will fail. -// -// Additionally, any changes that are not validated and handled by the known ChangeValidations -// are deemed as unsafe and returns an error. -func (cv *ChangeValidator) Validate(old, new v1.CustomResourceDefinition) error { - errs := []error{} - for _, version := range old.Spec.Versions { - newVersion := manifestcomparators.GetVersionByName(&new, version.Name) - if newVersion == nil { - // if the new version doesn't exist skip this version - continue - } - flatOld := FlattenSchema(version.Schema.OpenAPIV3Schema) - flatNew := FlattenSchema(newVersion.Schema.OpenAPIV3Schema) - - diffs, err := CalculateFlatSchemaDiff(flatOld, flatNew) - if err != nil { - errs = append(errs, fmt.Errorf("calculating schema diff for CRD version %q", version.Name)) - continue - } - - for field, diff := range diffs { - handled := false - for _, validation := range cv.Validations { - ok, err := validation(diff) - if err != nil { - errs = append(errs, fmt.Errorf("version %q, field %q: %w", version.Name, field, err)) - } - if ok { - handled = true - break - } - } - - if !handled { - errs = append(errs, fmt.Errorf("version %q, field %q has unknown change, refusing to determine that change is safe", version.Name, field)) - } - } - } - - if len(errs) > 0 { - return errors.Join(errs...) - } - return nil -} - -type FieldDiff struct { - Old *v1.JSONSchemaProps - New *v1.JSONSchemaProps -} - -// FlatSchema is a flat representation of a CRD schema. -type FlatSchema map[string]*v1.JSONSchemaProps - -// FlattenSchema takes in a CRD version OpenAPIV3Schema and returns -// a flattened representation of it. For example, a CRD with a schema of: -// ```yaml -// -// ... -// spec: -// type: object -// properties: -// foo: -// type: string -// bar: -// type: string -// ... -// -// ``` -// would be represented as: -// -// map[string]*v1.JSONSchemaProps{ -// "^": {}, -// "^.spec": {}, -// "^.spec.foo": {}, -// "^.spec.bar": {}, -// } -// -// where "^" represents the "root" schema -func FlattenSchema(schema *v1.JSONSchemaProps) FlatSchema { - fieldMap := map[string]*v1.JSONSchemaProps{} - - manifestcomparators.SchemaHas(schema, - field.NewPath("^"), - field.NewPath("^"), - nil, - func(s *v1.JSONSchemaProps, _, simpleLocation *field.Path, _ []*v1.JSONSchemaProps) bool { - fieldMap[simpleLocation.String()] = s.DeepCopy() - return false - }) - - return fieldMap -} - -// CalculateFlatSchemaDiff finds fields in a FlatSchema that are different -// and returns a mapping of field --> old and new field schemas. If a field -// exists in the old FlatSchema but not the new an empty diff mapping and an error is returned. -func CalculateFlatSchemaDiff(o, n FlatSchema) (map[string]FieldDiff, error) { - diffMap := map[string]FieldDiff{} - for field, schema := range o { - if _, ok := n[field]; !ok { - return diffMap, fmt.Errorf("field %q in existing not found in new", field) - } - newSchema := n[field] - - // Copy the schemas and remove any child properties for comparison. - // In theory this will focus in on detecting changes for only the - // field we are looking at and ignore changes in the children fields. - // Since we are iterating through the map that should have all fields - // we should still detect changes in the children fields. - oldCopy := schema.DeepCopy() - newCopy := newSchema.DeepCopy() - oldCopy.Properties = nil - newCopy.Properties = nil - if !reflect.DeepEqual(oldCopy, newCopy) { - diffMap[field] = FieldDiff{ - Old: oldCopy, - New: newCopy, - } - } - } - return diffMap, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/preflight.go b/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/preflight.go deleted file mode 100644 index 482ef5cfc..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/crdupgradesafety/preflight.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package crdupgradesafety - -import ( - "context" - "errors" - "fmt" - - cmdcore "carvel.dev/kapp/pkg/kapp/cmd/core" - ctldgraph "carvel.dev/kapp/pkg/kapp/diffgraph" - "carvel.dev/kapp/pkg/kapp/preflight" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -var _ preflight.Check = (*Preflight)(nil) - -// Preflight is an implementation of preflight.Check -// to make it easier to add crd upgrade validation -// as a preflight check -type Preflight struct { - depsFactory cmdcore.DepsFactory - enabled bool - validator *Validator -} - -func NewPreflight(df cmdcore.DepsFactory, enabled bool) *Preflight { - return &Preflight{ - depsFactory: df, - enabled: enabled, - validator: &Validator{ - Validations: []Validation{ - NewValidationFunc("NoScopeChange", NoScopeChange), - NewValidationFunc("NoStoredVersionRemoved", NoStoredVersionRemoved), - NewValidationFunc("NoExistingFieldRemoved", NoExistingFieldRemoved), - &ChangeValidator{ - Validations: []ChangeValidation{ - EnumChangeValidation, - RequiredFieldChangeValidation, - MinimumChangeValidation, - MinimumItemsChangeValidation, - MinimumLengthChangeValidation, - MinimumPropertiesChangeValidation, - MaximumChangeValidation, - MaximumLengthChangeValidation, - MaximumItemsChangeValidation, - MaximumPropertiesChangeValidation, - DefaultValueChangeValidation, - }, - }, - }, - }, - } -} - -func (p *Preflight) Enabled() bool { - return p.enabled -} - -func (p *Preflight) SetEnabled(enabled bool) { - p.enabled = enabled -} - -func (p *Preflight) SetConfig(_ preflight.CheckConfig) error { - return nil -} - -func (p *Preflight) Run(ctx context.Context, changeGraph *ctldgraph.ChangeGraph) error { - dCli, err := p.depsFactory.DynamicClient(cmdcore.DynamicClientOpts{}) - if err != nil { - return fmt.Errorf("getting dynamic client: %w", err) - } - crdCli := dCli.Resource(v1.SchemeGroupVersion.WithResource("customresourcedefinitions")) - - validateErrs := []error{} - for _, change := range changeGraph.All() { - // Loop through all the changes looking for "upsert" operations on - // a CRD. "upsert" is used for create + update operations - if change.Change.Op() != ctldgraph.ActualChangeOpUpsert { - continue - } - res := change.Change.Resource() - if res.GroupVersion().WithKind(res.Kind()) != v1.SchemeGroupVersion.WithKind("CustomResourceDefinition") { - continue - } - - // to properly determine if this is an update operation, attempt to fetch - // the "old" CRD from the cluster - uOldCRD, err := crdCli.Get(ctx, res.Name(), metav1.GetOptions{}) - if err != nil { - // if the resource is not found, this "upsert" operation - // translates to a "create" request being made. Skip this change - if apierrors.IsNotFound(err) { - continue - } - - return fmt.Errorf("checking for existing CRD resource: %w", err) - } - - oldCRD := &v1.CustomResourceDefinition{} - s := runtime.NewScheme() - if err := v1.AddToScheme(s); err != nil { - return fmt.Errorf("adding apiextension apis to scheme: %w", err) - } - if err := s.Convert(uOldCRD, oldCRD, nil); err != nil { - return fmt.Errorf("couldn't convert old CRD resource to a CRD object: %w", err) - } - - newCRD := &v1.CustomResourceDefinition{} - if err := res.AsUncheckedTypedObj(newCRD); err != nil { - return fmt.Errorf("couldn't convert new CRD resource to a CRD object: %w", err) - } - - if err = p.validator.Validate(*oldCRD, *newCRD); err != nil { - validateErrs = append(validateErrs, err) - } - } - - if len(validateErrs) > 0 { - baseErr := errors.New("validation for safe CRD upgrades failed") - return errors.Join(append([]error{baseErr}, validateErrs...)...) - } - - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/blocked_changes.go b/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/blocked_changes.go deleted file mode 100644 index d6b9f026a..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/blocked_changes.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package diffgraph - -import ( - "fmt" -) - -type BlockedChanges struct { - graph *ChangeGraph - unblocked map[*Change]struct{} -} - -func NewBlockedChanges(graph *ChangeGraph) *BlockedChanges { - return &BlockedChanges{graph, map[*Change]struct{}{}} -} - -func (c *BlockedChanges) Unblocked() []*Change { - return c.graph.AllMatching(c.isUnblocked) -} - -func (c *BlockedChanges) Blocked() []*Change { - return c.graph.AllMatching(func(change *Change) bool { return !c.isUnblocked(change) }) -} - -func (c *BlockedChanges) WhyBlocked(changes []*Change) string { - var result string - for _, change := range changes { - result += fmt.Sprintf("%s\n", change.Change.Resource().Description()) - for _, childChange := range change.WaitingFor { - if c.isBlocked(childChange) { - result += fmt.Sprintf(" [blocked] %s\n", childChange.Change.Resource().Description()) - } - } - } - return result -} - -func (c *BlockedChanges) Unblock(change *Change) { - c.unblocked[change] = struct{}{} -} - -func (c *BlockedChanges) isUnblocked(change *Change) bool { - for _, childChange := range change.WaitingFor { - if c.isBlocked(childChange) { - return false - } - } - return true -} - -func (c *BlockedChanges) isBlocked(change *Change) bool { - _, found := c.unblocked[change] - return !found -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change.go b/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change.go deleted file mode 100644 index 47914a277..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package diffgraph - -import ( - "fmt" - "strings" - - ctlconf "carvel.dev/kapp/pkg/kapp/config" - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -const ( - changeGroupAnnKey = "kapp.k14s.io/change-group" - changeGroupAnnPrefixKey = "kapp.k14s.io/change-group." - - changeRuleAnnKey = "kapp.k14s.io/change-rule" - changeRuleAnnPrefixKey = "kapp.k14s.io/change-rule." -) - -type ActualChange interface { - Resource() ctlres.Resource - Op() ActualChangeOp -} - -type ActualChangeOp string - -const ( - ActualChangeOpUpsert ActualChangeOp = "upsert" - ActualChangeOpDelete ActualChangeOp = "delete" - ActualChangeOpNoop ActualChangeOp = "noop" -) - -type Change struct { - Change ActualChange - WaitingFor []*Change - - changeGroupBindings []ctlconf.ChangeGroupBinding - changeRuleBindings []ctlconf.ChangeRuleBinding - - groups *[]ChangeGroup - rules *[]ChangeRule -} - -type Changes []*Change - -func (c *Change) Description() string { - return fmt.Sprintf("(%s) %s", c.Change.Op(), c.Change.Resource().Description()) -} - -func (c *Change) IsDirectlyWaitingFor(changeToFind *Change) bool { - for _, change := range c.WaitingFor { - if change == changeToFind { - return true - } - } - return false -} - -func (c *Change) IsTransitivelyWaitingFor(changeToFind *Change) bool { - alreadyChecked := map[*Change]struct{}{} - alreadyVisited := map[*Change]struct{}{} - return c.isTransitivelyWaitingFor(changeToFind, alreadyChecked, alreadyVisited) -} - -func (c *Change) isTransitivelyWaitingFor(changeToFind *Change, - alreadyChecked map[*Change]struct{}, alreadyVisited map[*Change]struct{}) bool { - - if c.IsDirectlyWaitingFor(changeToFind) { - return true - } - - for _, change := range c.WaitingFor { - if _, checked := alreadyChecked[change]; checked { - continue - } - alreadyChecked[change] = struct{}{} - - // Should not happen, but let's double check to avoid infinite loops - if _, visited := alreadyVisited[change]; visited { - panic(fmt.Sprintf("Change: Internal error: cycle detected: %s", - change.Change.Resource().Description())) - } - alreadyVisited[change] = struct{}{} - - if change.isTransitivelyWaitingFor(changeToFind, alreadyChecked, alreadyVisited) { - return true - } - - delete(alreadyVisited, change) - } - - return false -} - -func (c *Change) Groups() ([]ChangeGroup, error) { - if c.groups != nil { - return *c.groups, nil - } - - var groups []ChangeGroup - res := c.Change.Resource() - - for k, v := range res.Annotations() { - if k == changeGroupAnnKey || strings.HasPrefix(k, changeGroupAnnPrefixKey) { - name, err := NewChangeGroupNameForResource(v, c.Change.Resource()).AsString() - if err != nil { - return nil, err - } - groupKey, err := NewChangeGroupFromAnnString(name) - if err != nil { - return nil, err - } - groups = append(groups, groupKey) - } - } - - for _, groupConfig := range c.changeGroupBindings { - rms := ctlconf.ResourceMatchers(groupConfig.ResourceMatchers).AsResourceMatchers() - - if (ctlres.AnyMatcher{rms}).Matches(res) { - name, err := NewChangeGroupNameForResource(groupConfig.Name, c.Change.Resource()).AsString() - if err != nil { - return nil, err - } - groupKey, err := NewChangeGroupFromAnnString(name) - if err != nil { - return nil, err - } - groups = append(groups, groupKey) - } - } - - c.groups = &groups - - return groups, nil -} - -func (c *Change) AllRules() ([]ChangeRule, error) { - if c.rules != nil { - return *c.rules, nil - } - - var rules []ChangeRule - res := c.Change.Resource() - - for k, v := range res.Annotations() { - if k == changeRuleAnnKey || strings.HasPrefix(k, changeRuleAnnPrefixKey) { - ruleStr, err := NewChangeGroupNameForResource(v, c.Change.Resource()).AsString() - if err != nil { - return nil, err - } - rule, err := NewChangeRuleFromAnnString(ruleStr) - if err != nil { - return nil, fmt.Errorf("Resource %s: %w", res.Description(), err) - } - rules = append(rules, rule) - } - } - - for i, ruleConfig := range c.changeRuleBindings { - rms := ctlconf.ResourceMatchers(ruleConfig.ResourceMatchers).AsResourceMatchers() - - if (ctlres.AnyMatcher{rms}).Matches(res) { - for _, ruleStr := range ruleConfig.Rules { - ruleStr, err := NewChangeGroupNameForResource(ruleStr, c.Change.Resource()).AsString() - if err != nil { - return nil, err - } - rule, err := NewChangeRuleFromAnnString(ruleStr) - if err != nil { - return nil, fmt.Errorf("Resource %s: %w", res.Description(), err) - } - rule.IgnoreIfCyclical = ruleConfig.IgnoreIfCyclical - rule.weight = 100 + i // start at 100 - rules = append(rules, rule) - } - } - } - - c.rules = &rules - - return rules, nil -} - -func (c *Change) ApplicableRules() ([]ChangeRule, error) { - var isUpsert, isDelete bool - - op := c.Change.Op() - - switch op { - case ActualChangeOpUpsert: - isUpsert = true - case ActualChangeOpDelete: - isDelete = true - case ActualChangeOpNoop: - default: - return nil, fmt.Errorf("Unknown change operation: %s", op) - } - - rules, err := c.AllRules() - if err != nil { - return nil, err - } - - var applicableRules []ChangeRule - for _, rule := range rules { - if (isUpsert && rule.Action == ChangeRuleActionUpsert) || - (isDelete && rule.Action == ChangeRuleActionDelete) { - applicableRules = append(applicableRules, rule) - } - } - return applicableRules, nil -} - -func (cs Changes) MatchesRule(rule ChangeRule, _ *Change) ([]*Change, error) { - var result []*Change - - for _, change := range cs { - groups, err := change.Groups() - if err != nil { - return nil, err - } - - for _, group := range groups { - if !group.IsEqual(rule.TargetGroup) { - continue - } - - op := change.Change.Op() - - switch op { - case ActualChangeOpUpsert: - if rule.TargetAction == ChangeRuleTargetActionUpserting { - result = append(result, change) - } - case ActualChangeOpDelete: - if rule.TargetAction == ChangeRuleTargetActionDeleting { - result = append(result, change) - } - case ActualChangeOpNoop: - default: - panic(fmt.Sprintf("Unknown change operation: %s", op)) - } - } - } - - return result, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_graph.go b/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_graph.go deleted file mode 100644 index 375437ca1..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_graph.go +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package diffgraph - -import ( - "fmt" - "sort" - "strings" - - ctlconf "carvel.dev/kapp/pkg/kapp/config" - "carvel.dev/kapp/pkg/kapp/logger" -) - -type ChangeGraph struct { - changes []*Change - logger logger.Logger -} - -func NewChangeGraph(changes []ActualChange, - changeGroupBindings []ctlconf.ChangeGroupBinding, - changeRuleBindings []ctlconf.ChangeRuleBinding, - logger logger.Logger) (*ChangeGraph, error) { - - logger = logger.NewPrefixed("ChangeGraph") - defer logger.DebugFunc("NewChangeGraph").Finish() - - graphChanges := []*Change{} - - for _, change := range changes { - graphChanges = append(graphChanges, &Change{ - Change: change, - changeGroupBindings: changeGroupBindings, - changeRuleBindings: changeRuleBindings, - }) - } - - graph := &ChangeGraph{graphChanges, logger} - - err := graph.buildEdges( - func(rule ChangeRule) bool { return !rule.IgnoreIfCyclical }, - func(_, _ *Change) bool { return true }, - ) - if err != nil { - return graph, fmt.Errorf("Change graph: Calculating required deps: %w", err) - } - - err = graph.checkCycles() - if err != nil { - // Return graph for inspection - return graph, err - } - - // At this point it's guranteed that there are no cycles - // Start adding optional rules but only if they do not introduce - // new cycles. For example, given - // A -> B -> C - // if we try to add - // C -> A - // cycle will be formed. To check that quickly it's only necessary - // to check if one can get to C from A, hence, C -> A is rejected. - - err = graph.buildEdges( - func(rule ChangeRule) bool { return rule.IgnoreIfCyclical }, - func(graphChange, matchedChange *Change) bool { - return graphChange != matchedChange && - !graphChange.IsDirectlyWaitingFor(matchedChange) && - !matchedChange.IsTransitivelyWaitingFor(graphChange) - }, - ) - if err != nil { - return graph, fmt.Errorf("Change graph: Calculating optional deps: %w", err) - } - - graph.dedup() - - // Double check cycles again - return graph, graph.checkCycles() -} - -type sortedRule struct { - *Change - ChangeRule -} - -func (g *ChangeGraph) buildEdges(allowRule func(ChangeRule) bool, - allowChange func(*Change, *Change) bool) error { - - defer g.logger.DebugFunc("buildEdges").Finish() - - var sortedRules []sortedRule - - for _, graphChange := range g.changes { - rules, err := graphChange.ApplicableRules() - if err != nil { - return err - } - - for _, rule := range rules { - if allowRule(rule) { - sortedRules = append(sortedRules, sortedRule{graphChange, rule}) - } - } - } - - // Since some rules may conflict with other rules (cause cycles) - // we need to order rules so that they are added deterministically - sort.SliceStable(sortedRules, func(i, j int) bool { - // Higher weighted rules come first - return sortedRules[i].ChangeRule.weight > sortedRules[j].ChangeRule.weight - }) - - for _, sr := range sortedRules { - matchedChanges, err := Changes(g.changes).MatchesRule(sr.ChangeRule, sr.Change) - if err != nil { - return err - } - - switch { - case sr.ChangeRule.Order == ChangeRuleOrderAfter: - for _, matchedChange := range matchedChanges { - if allowChange(sr.Change, matchedChange) { - sr.Change.WaitingFor = append(sr.Change.WaitingFor, matchedChange) - } - } - - case sr.ChangeRule.Order == ChangeRuleOrderBefore: - for _, matchedChange := range matchedChanges { - if allowChange(matchedChange, sr.Change) { - matchedChange.WaitingFor = append(matchedChange.WaitingFor, sr.Change) - } - } - - default: - panic("Unknown change rule order") - } - } - - return nil -} - -func (g *ChangeGraph) All() []*Change { - return g.AllMatching(func(_ *Change) bool { return true }) -} - -func (g *ChangeGraph) Linearized() ([][]*Change, []*Change) { - var resultLinearized [][]*Change - var resultBlocked []*Change - - recordedChanges := map[*Change]struct{}{} - blockedChanges := NewBlockedChanges(g) - lastBlockedChanges := 0 - - for { - unblocked := blockedChanges.Unblocked() - blocked := blockedChanges.Blocked() - - var sectionLinearized []*Change - for _, unblockedChange := range unblocked { - if _, found := recordedChanges[unblockedChange]; !found { - recordedChanges[unblockedChange] = struct{}{} - blockedChanges.Unblock(unblockedChange) - sectionLinearized = append(sectionLinearized, unblockedChange) - } - } - resultLinearized = append(resultLinearized, sectionLinearized) - - if len(blocked) == 0 || len(blocked) == lastBlockedChanges { - for _, blockedChange := range blocked { - resultBlocked = append(resultBlocked, blockedChange) - } - return resultLinearized, resultBlocked - } - - lastBlockedChanges = len(blocked) - } -} - -func (g *ChangeGraph) AllMatching(matchFunc func(*Change) bool) []*Change { - var result []*Change - // Need to do this _only_ at the first level since - // all changes are included at the top level - for _, change := range g.changes { - if matchFunc(change) { - result = append(result, change) - } - } - return result -} - -func (g *ChangeGraph) RemoveMatching(matchFunc func(*Change) bool) { - var result []*Change - // Need to do this _only_ at the first level since - // all changes are included at the top level - for _, change := range g.changes { - if !matchFunc(change) { - result = append(result, change) - } - } - g.changes = result -} - -func (g *ChangeGraph) Print() { - fmt.Printf("%s", g.PrintStr()) -} - -func (g *ChangeGraph) PrintStr() string { - return g.printChanges(g.changes, map[*Change]bool{}, "") -} - -func (g *ChangeGraph) PrintLinearizedStr() string { - linearizedChangeSections, blockedChanges := g.Linearized() - - var result []string - - for _, changes := range linearizedChangeSections { - var section []string - for _, change := range changes { - section = append(section, change.Description()) - } - result = append(result, strings.Join(section, "\n")) - } - - if len(blockedChanges) > 0 { - var section []string - for _, change := range blockedChanges { - section = append(section, change.Description()) - } - result = append(result, "...more blocked...\n"+strings.Join(section, "\n")) - } - - return strings.Join(result, "\n---\n") -} - -func (g *ChangeGraph) printChanges(changes []*Change, - visitedChanges map[*Change]bool, indent string) string { - - var result string - - for _, change := range changes { - result += fmt.Sprintf("%s%s\n", indent, change.Description()) - - if _, found := visitedChanges[change]; !found { - visitedChanges[change] = true - result += g.printChanges(change.WaitingFor, visitedChanges, indent+" ") - delete(visitedChanges, change) - } else { - result += indent + "cycle found\n" - } - } - - return result -} - -func (g *ChangeGraph) dedup() { - for _, rootChange := range g.changes { - seenWaitingFor := map[*Change]struct{}{} - newWaitingFor := []*Change{} - - for _, change := range rootChange.WaitingFor { - if _, ok := seenWaitingFor[change]; !ok { - seenWaitingFor[change] = struct{}{} - newWaitingFor = append(newWaitingFor, change) - } - } - - rootChange.WaitingFor = newWaitingFor - } -} - -// Implements depth-first search: -// https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search -func (g *ChangeGraph) checkCycles() error { - defer g.logger.DebugFunc("checkCycles").Finish() - - markedTemp := map[*Change]struct{}{} - markedPerm := map[*Change]struct{}{} - unmarked := []*Change{} - - for _, change := range g.changes { - unmarked = append(unmarked, change) - } - - for len(unmarked) > 0 { - nodeN := unmarked[0] - unmarked = unmarked[1:] - err := g.checkCyclesVisit(nodeN, markedTemp, markedPerm) - if err != nil { - return fmt.Errorf("Detected cycle while ordering changes: [%s] %w", - nodeN.Change.Resource().Description(), err) - } - } - - return nil -} - -func (g *ChangeGraph) checkCyclesVisit(nodeN *Change, markedTemp, markedPerm map[*Change]struct{}) error { - if _, found := markedPerm[nodeN]; found { - return nil - } - if _, found := markedTemp[nodeN]; found { - return fmt.Errorf("(found repeated: %s)", nodeN.Change.Resource().Description()) - } - markedTemp[nodeN] = struct{}{} - - for _, nodeM := range nodeN.WaitingFor { - err := g.checkCyclesVisit(nodeM, markedTemp, markedPerm) - if err != nil { - return fmt.Errorf("-> [%s] %w", nodeM.Change.Resource().Description(), err) - } - } - - delete(markedTemp, nodeN) - markedPerm[nodeN] = struct{}{} - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_group.go b/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_group.go deleted file mode 100644 index 12150f40a..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_group.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package diffgraph - -import ( - "fmt" - "strings" - - k8sval "k8s.io/apimachinery/pkg/util/validation" -) - -type ChangeGroup struct { - Name string -} - -func MustNewChangeGroupFromAnnString(ann string) ChangeGroup { - key, err := NewChangeGroupFromAnnString(ann) - if err != nil { - panic(err.Error()) - } - return key -} - -func NewChangeGroupFromAnnString(ann string) (ChangeGroup, error) { - key := ChangeGroup{ann} - - err := key.Validate() - if err != nil { - return ChangeGroup{}, err - } - - return key, nil -} - -func (r ChangeGroup) IsEqual(other ChangeGroup) bool { - return r.Name == other.Name -} - -func (r ChangeGroup) Validate() error { - if len(r.Name) == 0 { - return fmt.Errorf("Expected non-empty group name") - } - errStrs := r.isQualifiedNameWithoutLen(r.Name) - if len(errStrs) > 0 { - return fmt.Errorf("Expected change group name %q to be a qualified name: %s", r.Name, strings.Join(errStrs, "; ")) - } - return nil -} - -func (r ChangeGroup) isQualifiedNameWithoutLen(name string) []string { - errStrs := k8sval.IsQualifiedName(name) - var updatedErrStrs []string - for _, err := range errStrs { - // Allow change group names to have more characters than the default maxLength - if !strings.Contains(err, k8sval.MaxLenError(k8sval.DNS1035LabelMaxLength)) { - updatedErrStrs = append(updatedErrStrs, err) - } - } - return updatedErrStrs -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_group_name.go b/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_group_name.go deleted file mode 100644 index 16bdd716b..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_group_name.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package diffgraph - -import ( - "fmt" - "regexp" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - ctlcrd "carvel.dev/kapp/pkg/kapp/resourcesmisc" -) - -type ChangeGroupName struct { - name string - resource ctlres.Resource -} - -func NewChangeGroupNameForResource(name string, resource ctlres.Resource) ChangeGroupName { - return ChangeGroupName{name, resource} -} - -var ( - placeholderMatcher = regexp.MustCompile("{.+?}") -) - -// Placeholders have the format {placeholder-name} -// Other patterns like ${placeholder-name} are commonly used by other operators/tools -func (c ChangeGroupName) AsString() (string, error) { - var crdKind, crdGroup string - var err error - crd := ctlcrd.NewAPIExtensionsVxCRD(c.resource) - if crd != nil { - crdKind, err = crd.Kind() - if err != nil { - return c.name, err - } - crdGroup, err = crd.Group() - if err != nil { - return c.name, err - } - } - - values := map[string]string{ - "{api-group}": c.resource.APIGroup(), - "{kind}": c.resource.Kind(), - "{name}": c.resource.Name(), - "{namespace}": c.resource.Namespace(), - "{crd-kind}": crdKind, - "{crd-group}": crdGroup, - } - - replaced := placeholderMatcher.ReplaceAllStringFunc(c.name, func(placeholder string) string { - value, found := values[placeholder] - if !found { - err = fmt.Errorf("Expected placeholder to be one of these: %s but was %s", c.placeholders(values), placeholder) - } - if value == "" { - err = fmt.Errorf("Placeholder %s does not have a value for target resource (hint: placeholders with the 'crd-' prefix can only be used with CRDs)", placeholder) - } - return value - }) - - return replaced, err -} - -func (c ChangeGroupName) placeholders(values map[string]string) (placeholders []string) { - for k := range values { - placeholders = append(placeholders, k) - } - return placeholders -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_rule.go b/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_rule.go deleted file mode 100644 index c8c4b2dfb..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/diffgraph/change_rule.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package diffgraph - -import ( - "fmt" - "strings" -) - -type ChangeRuleAction string -type ChangeRuleOrder string -type ChangeRuleTargetAction string - -const ( - ChangeRuleActionUpsert ChangeRuleAction = "upsert" - ChangeRuleActionDelete ChangeRuleAction = "delete" - - ChangeRuleOrderBefore ChangeRuleOrder = "before" - ChangeRuleOrderAfter ChangeRuleOrder = "after" - - ChangeRuleTargetActionUpserting ChangeRuleTargetAction = "upserting" - ChangeRuleTargetActionDeleting ChangeRuleTargetAction = "deleting" -) - -// Example: upsert before deleting apps.big.co/etcd -type ChangeRule struct { - Action ChangeRuleAction - Order ChangeRuleOrder - TargetAction ChangeRuleTargetAction - TargetGroup ChangeGroup - IgnoreIfCyclical bool - - weight int -} - -func NewChangeRuleFromAnnString(ann string) (ChangeRule, error) { - pieces := strings.Split(ann, " ") - if len(pieces) != 4 { - return ChangeRule{}, fmt.Errorf( - "Expected change rule annotation value to have format '(upsert|delete) (before|after) (upserting|deleting) (change-group)', but was '%s'", ann) - } - - rule := ChangeRule{ - Action: ChangeRuleAction(pieces[0]), - Order: ChangeRuleOrder(pieces[1]), - TargetAction: ChangeRuleTargetAction(pieces[2]), - } - - var err error - - rule.TargetGroup, err = NewChangeGroupFromAnnString(pieces[3]) - if err != nil { - return ChangeRule{}, err - } - - err = rule.Validate() - if err != nil { - return ChangeRule{}, err - } - - return rule, nil -} - -func (r ChangeRule) Validate() error { - if r.Action != ChangeRuleActionUpsert && r.Action != ChangeRuleActionDelete { - return fmt.Errorf("Unknown change rule Action") - } - if r.Order != ChangeRuleOrderBefore && r.Order != ChangeRuleOrderAfter { - return fmt.Errorf("Unknown change rule Order") - } - if r.TargetAction != ChangeRuleTargetActionUpserting && r.TargetAction != ChangeRuleTargetActionDeleting { - return fmt.Errorf("Unknown change rule TargetAction") - } - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/logger/interface.go b/vendor/carvel.dev/kapp/pkg/kapp/logger/interface.go deleted file mode 100644 index b7fc83b79..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/logger/interface.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package logger - -type Logger interface { - DebugFunc(name string) FuncLogger - NewPrefixed(name string) Logger - - Error(msg string, args ...interface{}) - Info(msg string, args ...interface{}) - Debug(msg string, args ...interface{}) -} - -type FuncLogger interface { - Finish() -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/logger/noop.go b/vendor/carvel.dev/kapp/pkg/kapp/logger/noop.go deleted file mode 100644 index cb7e4a09f..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/logger/noop.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package logger - -type NoopLogger struct{} - -var _ Logger = NoopLogger{} - -func NewNoopLogger() NoopLogger { return NoopLogger{} } -func NewTODOLogger() NoopLogger { return NewNoopLogger() } - -func (l NoopLogger) Error(_ string, _ ...interface{}) {} -func (l NoopLogger) Info(_ string, _ ...interface{}) {} -func (l NoopLogger) Debug(_ string, _ ...interface{}) {} -func (l NoopLogger) DebugFunc(_ string) FuncLogger { return NoopFuncLogger{} } -func (l NoopLogger) NewPrefixed(_ string) Logger { return l } - -type NoopFuncLogger struct{} - -var _ FuncLogger = NoopFuncLogger{} - -func (l NoopFuncLogger) Finish() {} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/logger/ui.go b/vendor/carvel.dev/kapp/pkg/kapp/logger/ui.go deleted file mode 100644 index e952c92aa..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/logger/ui.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package logger - -import ( - "fmt" - "time" - - "github.com/cppforlife/go-cli-ui/ui" -) - -const ( - loggerLevelError = "error" - loggerLevelInfo = "info" - loggerLevelDebug = "debug" -) - -type UILogger struct { - prefix string - ui ui.UI - debug bool -} - -var _ Logger = &UILogger{} - -func NewUILogger(ui ui.UI) *UILogger { return &UILogger{"", ui, false} } - -func (l *UILogger) SetDebug(debug bool) { l.debug = debug } - -func (l *UILogger) Error(msg string, args ...interface{}) { - l.ui.BeginLinef(l.msg(loggerLevelError, msg), args...) -} - -func (l *UILogger) Info(msg string, args ...interface{}) { - l.ui.BeginLinef(l.msg(loggerLevelInfo, msg), args...) -} - -func (l *UILogger) Debug(msg string, args ...interface{}) { - if l.debug { - l.ui.BeginLinef(l.msg(loggerLevelDebug, msg), args...) - } -} - -func (l *UILogger) DebugFunc(name string) FuncLogger { - funcLogger := &UIFuncLogger{name, time.Now(), l.NewPrefixed(name)} - funcLogger.Start() - return funcLogger -} - -func (l *UILogger) NewPrefixed(name string) Logger { - if len(l.prefix) > 0 { - name = l.prefix + name - } - name += ": " - return &UILogger{name, l.ui, l.debug} -} - -func (l *UILogger) msg(level, msg string) string { - ts := time.Now().Format("03:04:05PM") - return fmt.Sprintf("%s: %s: %s%s\n", ts, level, l.prefix, msg) -} - -type UIFuncLogger struct { - name string - startTime time.Time - logger Logger -} - -var _ FuncLogger = &UIFuncLogger{} - -func (l *UIFuncLogger) Start() { l.logger.Debug("start") } -func (l *UIFuncLogger) Finish() { l.logger.Debug("end (%s)", time.Now().Sub(l.startTime)) } diff --git a/vendor/carvel.dev/kapp/pkg/kapp/matcher/string.go b/vendor/carvel.dev/kapp/pkg/kapp/matcher/string.go deleted file mode 100644 index e357f3576..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/matcher/string.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package matcher - -import ( - "regexp" - "strings" -) - -const ( - stringMatcherGlob1 = '*' - stringMatcherGlob2 = '%' // not special char in Bash -) - -type StringMatcher struct { - expected string -} - -func NewStringMatcher(expected string) StringMatcher { - return StringMatcher{expected} -} - -func (f StringMatcher) Matches(actual string) bool { - firstChar := f.expected[0] - lastChar := f.expected[len(f.expected)-1] - - prefixGlob := firstChar == stringMatcherGlob1 || firstChar == stringMatcherGlob2 - suffixGlob := lastChar == stringMatcherGlob1 || lastChar == stringMatcherGlob2 - - switch { - case prefixGlob && suffixGlob: - return regexp.MustCompile(regexp.QuoteMeta(f.expected[1 : len(f.expected)-1])).MatchString(actual) - - case prefixGlob: - return strings.HasSuffix(actual, f.expected[1:]) - - case suffixGlob: - return strings.HasPrefix(actual, f.expected[:len(f.expected)-1]) - - default: - return actual == f.expected - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/preflight/check.go b/vendor/carvel.dev/kapp/pkg/kapp/preflight/check.go deleted file mode 100644 index feabe590b..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/preflight/check.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package preflight - -import ( - "context" - - ctldgraph "carvel.dev/kapp/pkg/kapp/diffgraph" -) - -type CheckFunc func(context.Context, *ctldgraph.ChangeGraph, CheckConfig) error -type CheckConfig map[string]any -type ConfigFunc func(CheckConfig) error - -type Check interface { - Enabled() bool - SetEnabled(bool) - SetConfig(CheckConfig) error - Run(context.Context, *ctldgraph.ChangeGraph) error -} - -type checkImpl struct { - enabled bool - checkFunc CheckFunc - - config CheckConfig - configFunc ConfigFunc -} - -func NewCheck(cf CheckFunc, sf ConfigFunc, enabled bool) Check { - return &checkImpl{ - enabled: enabled, - checkFunc: cf, - configFunc: sf, - } -} - -func (cf *checkImpl) Enabled() bool { - return cf.enabled -} - -func (cf *checkImpl) SetEnabled(enabled bool) { - cf.enabled = enabled -} - -func (cf *checkImpl) SetConfig(config CheckConfig) error { - cf.config = config - if cf.configFunc != nil { - return cf.configFunc(config) - } - return nil -} - -func (cf *checkImpl) Run(ctx context.Context, changeGraph *ctldgraph.ChangeGraph) error { - return cf.checkFunc(ctx, changeGraph, cf.config) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/preflight/registry.go b/vendor/carvel.dev/kapp/pkg/kapp/preflight/registry.go deleted file mode 100644 index 1d812800c..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/preflight/registry.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package preflight - -import ( - "context" - "fmt" - "strings" - - "carvel.dev/kapp/pkg/kapp/config" - ctldgraph "carvel.dev/kapp/pkg/kapp/diffgraph" - "github.com/spf13/pflag" -) - -const preflightFlag = "preflight" - -// Registry is a collection of preflight checks -type Registry struct { - known map[string]Check - // Stores the enabled values from the command line - enabledFlag map[string]bool -} - -// NewRegistry will return a new *Registry with the -// provided set of preflight checks added to the registry -func NewRegistry(checks map[string]Check) *Registry { - registry := &Registry{} - for name, check := range checks { - registry.AddCheck(name, check) - } - return registry -} - -// String returns a string representation of the -// enabled preflight checks. It follows the format: -// CheckName,... -// This method is needed so Registry implements -// the pflag.Value interface -func (c *Registry) String() string { - enabled := []string{} - for k, v := range c.known { - if v.Enabled() { - enabled = append(enabled, k) - } - } - return strings.Join(enabled, ",") -} - -// Type returns a string representing the type -// of the Registry. It is needed to implement the -// pflag.Value interface -func (c *Registry) Type() string { - return fmt.Sprintf("%T", c) -} - -// Set takes in a string in the format of -// CheckName,... -// and sets the specified preflight check -// as enabled if listed, otherwise, sets as -// disabled if not listed. -// Returns an error if there is a problem -// parsing the preflight checks -func (c *Registry) Set(s string) error { - if c.known == nil || c.enabledFlag == nil { - return nil - } - - // Using enabledFlag allows multiple --preflight check flags to be specified - mappings := strings.Split(s, ",") - for _, key := range mappings { - if _, ok := c.known[key]; !ok { - return fmt.Errorf("unknown preflight check %q specified", key) - } - c.enabledFlag[key] = true - } - - // enable/disabled based on validators specified - for key := range c.known { - enabled, ok := c.enabledFlag[key] - c.known[key].SetEnabled(ok && enabled) - } - return nil -} - -// AddFlags adds the --preflight flag to a -// pflag.FlagSet and configures the preflight -// checks in the registry based on the user provided -// values. If no values are provided by a user the -// default values are used. -func (c *Registry) AddFlags(flags *pflag.FlagSet) { - knownChecks := []string{} - for name := range c.known { - knownChecks = append(knownChecks, name) - } - flags.Var(c, preflightFlag, fmt.Sprintf("preflight checks to run. Available preflight checks are [%s]", strings.Join(knownChecks, ","))) -} - -// AddCheck adds a new preflight check to the registry. -// The name provided will map to the provided Check. -func (c *Registry) AddCheck(name string, check Check) { - if c.known == nil { - c.known = make(map[string]Check) - } - if c.enabledFlag == nil { - c.enabledFlag = make(map[string]bool) - } - c.known[name] = check -} - -// Validate the configuration provided; the rules are: -// 1. Unknown validator = error -// 2. Duplicate validator = error -func (c *Registry) validateConfig(conf []config.PreflightRule) error { - haveConfig := map[string]bool{} - for _, rule := range conf { - if _, ok := c.known[rule.Name]; !ok { - return fmt.Errorf("unknown preflight check in configuration: %q", rule.Name) - } - if _, ok := haveConfig[rule.Name]; ok { - return fmt.Errorf("duplicate preflight check in configuration: %q", rule.Name) - } - haveConfig[rule.Name] = true - } - return nil -} - -func (c *Registry) SetConfig(conf []config.PreflightRule) error { - // We get the --preflight cmdline flag _before_ the configuration from the file. - // So, we need to evaluate the config that we've gotten in light of the enabledFlag - if err := c.validateConfig(conf); err != nil { - return err - } - // map the configuration by name - config := map[string]map[string]any{} - for _, rule := range conf { - config[rule.Name] = rule.Config - } - if len(c.enabledFlag) == 0 { - // no --preflight flag, so enable validators according to their presence in the config - for name, check := range c.known { - _, ok := config[name] - check.SetEnabled(ok) - } - } - for name, check := range c.known { - err := check.SetConfig(config[name]) - if err != nil { - return fmt.Errorf("setting preflight config %q: %w", name, err) - } - } - return nil -} - -// Run will execute any enabled preflight checks. The provided -// Context and ChangeGraph will be passed to the preflight checks -// that are being executed. -func (c *Registry) Run(ctx context.Context, cg *ctldgraph.ChangeGraph) error { - for name, check := range c.known { - if check.Enabled() { - err := check.Run(ctx, cg) - if err != nil { - return fmt.Errorf("running preflight check %q: %w", name, err) - } - } - } - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/association_label.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/association_label.go deleted file mode 100644 index 4a3bebc15..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/association_label.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "crypto/md5" - "fmt" - - "k8s.io/apimachinery/pkg/labels" -) - -const ( - kappAssociationLabelKey = "kapp.k14s.io/association" - kappAssociationLabelV1 = "v1" -) - -type AssociationLabel struct { - resource Resource -} - -func NewAssociationLabel(resource Resource) AssociationLabel { - return AssociationLabel{resource} -} - -func (a AssociationLabel) v1Value() string { - // max 63 char for label values - key := fmt.Sprintf("%x", md5.Sum([]byte(NewUniqueResourceKey(a.resource).String()))) - return kappAssociationLabelV1 + "." + key -} - -func (a AssociationLabel) Key() string { return kappAssociationLabelKey } -func (a AssociationLabel) Value() string { return a.v1Value() } - -func (a AssociationLabel) AsSelector() labels.Selector { - return labels.Set(map[string]string{kappAssociationLabelKey: a.v1Value()}).AsSelector() -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/file_resources.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/file_resources.go deleted file mode 100644 index 35ba1b39e..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/file_resources.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" -) - -var ( - fileResourcesAllowedExts = []string{".json", ".yaml", ".yml"} // matches kubectl -) - -type FileResource struct { - fileSrc FileSource -} - -// NewFileResources inspects file and returns a slice of FileResource objects. If file is "-", a FileResource for STDIN -// is returned. If it is prefixed with either http:// or https://, a FileResource that supports an HTTP transport is -// returned. If file is a directory, one FileResource object is returned for each file in the directory with an allowed -// extension (.json, .yml, .yaml). If file is not a directory, a FileResource object is returned for that one file. If -// fsys is nil, NewFileResources uses the OS's file system. Otherwise, it uses the passed in file system. -func NewFileResources(fsys fs.FS, file string) ([]FileResource, error) { - var fileRs []FileResource - - switch { - case file == "-": - fileRs = append(fileRs, NewFileResource(NewStdinSource())) - - case strings.HasPrefix(file, "http://") || strings.HasPrefix(file, "https://"): - fileRs = append(fileRs, NewFileResource(NewHTTPFileSource(file))) - - default: - dir, err := isDir(fsys, file) - if err != nil { - return nil, err - } - - if dir { - // The typical command line invocation won't set fsys. If it comes in nil, create a new DirFS rooted at - // file, then set file to '.' (current working directory) so the fs.WalkDir call below works correctly. - if fsys == nil { - fsys = os.DirFS(file) - file = "." - } - - var paths []string - err := fs.WalkDir(fsys, file, func(path string, d fs.DirEntry, err error) error { - if err != nil || d.IsDir() { - return err - } - ext := filepath.Ext(path) - for _, allowedExt := range fileResourcesAllowedExts { - if allowedExt == ext { - paths = append(paths, path) - } - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("error listing file %q", file) - } - - sort.Strings(paths) - - for _, path := range paths { - fileRs = append(fileRs, NewFileResource(NewLocalFileSource(fsys, path))) - } - } else { - fileRs = append(fileRs, NewFileResource(NewLocalFileSource(fsys, file))) - } - } - - return fileRs, nil -} - -func NewFileResource(fileSrc FileSource) FileResource { return FileResource{fileSrc} } - -func (r FileResource) Description() string { return r.fileSrc.Description() } - -func (r FileResource) Resources() ([]Resource, error) { - docs, err := NewYAMLFile(r.fileSrc).Docs() - if err != nil { - return nil, err - } - - var resources []Resource - - for i, doc := range docs { - rs, err := NewResourcesFromBytes(doc) - if err != nil { - return nil, err - } - - for _, res := range rs { - res.SetOrigin(fmt.Sprintf("%s doc %d", r.fileSrc.Description(), i+1)) - } - - resources = append(resources, rs...) - } - - return resources, nil -} - -// isDir returns if path is a directory. If fsys is nil, isDir calls os.Stat(path); otherwise, it checks path inside -// fsys. -func isDir(fsys fs.FS, path string) (bool, error) { - if fsys == nil { - fileInfo, err := os.Stat(path) - if err != nil { - return false, err - } - return fileInfo.IsDir(), nil - } - - switch t := fsys.(type) { - case fs.StatFS: - fileInfo, err := t.Stat(path) - if err != nil { - return false, err - } - return fileInfo.IsDir(), nil - case fs.FS: - f, err := t.Open(path) - if err != nil { - return false, fmt.Errorf("error opening file %q: %v", path, err) - } - defer f.Close() - - fileInfo, err := f.Stat() - if err != nil { - return false, err - } - return fileInfo.IsDir(), nil - default: - return false, fmt.Errorf("error determining if %q is a directory: unexpected FS type %T", path, fsys) - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/file_sources.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/file_sources.go deleted file mode 100644 index f57860b8b..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/file_sources.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "io" - "io/fs" - "net/http" - "os" -) - -type FileSource interface { - Description() string - Bytes() ([]byte, error) -} - -type BytesSource struct { - bytes []byte -} - -var _ FileSource = BytesSource{} - -func NewBytesSource(bytes []byte) BytesSource { return BytesSource{bytes} } -func (s BytesSource) Description() string { return "bytes" } -func (s BytesSource) Bytes() ([]byte, error) { return s.bytes, nil } - -type StdinSource struct{} - -var _ FileSource = StdinSource{} - -func NewStdinSource() StdinSource { return StdinSource{} } -func (s StdinSource) Description() string { return "stdin" } -func (s StdinSource) Bytes() ([]byte, error) { return io.ReadAll(os.Stdin) } - -type LocalFileSource struct { - fsys fs.FS - path string -} - -var _ FileSource = LocalFileSource{} - -func NewLocalFileSource(fsys fs.FS, path string) LocalFileSource { - return LocalFileSource{fsys: fsys, path: path} -} -func (s LocalFileSource) Description() string { return fmt.Sprintf("file '%s'", s.path) } -func (s LocalFileSource) Bytes() ([]byte, error) { - switch t := s.fsys.(type) { - case fs.ReadFileFS: - return t.ReadFile(s.path) - case fs.FS: - f, err := t.Open(s.path) - if err != nil { - return nil, err - } - defer f.Close() - return fs.ReadFile(s.fsys, s.path) - default: - return os.ReadFile(s.path) - } -} - -type HTTPFileSource struct { - url string - Client *http.Client -} - -var _ FileSource = HTTPFileSource{} - -func NewHTTPFileSource(path string) HTTPFileSource { return HTTPFileSource{path, &http.Client{}} } - -func (s HTTPFileSource) Description() string { - return fmt.Sprintf("HTTP URL '%s'", s.url) -} - -func (s HTTPFileSource) Bytes() ([]byte, error) { - resp, err := s.Client.Get(s.url) - if err != nil { - return nil, fmt.Errorf("Requesting URL '%s': %w", s.url, err) - } - - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return nil, fmt.Errorf("Requesting URL '%s': %s", s.url, resp.Status) - } - - result, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("Reading URL '%s': %w", s.url, err) - } - - return result, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resource_config_maps.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resource_config_maps.go deleted file mode 100644 index 35f6f7a57..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resource_config_maps.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" -) - -func (r IdentifiedResources) ConfigMapResources(labelSelector labels.Selector) ([]corev1.ConfigMap, error) { - listOpts := metav1.ListOptions{LabelSelector: labelSelector.String()} - - mapList, err := r.coreClient.CoreV1().ConfigMaps("").List(context.TODO(), listOpts) - if err != nil { - return nil, err - } - - var maps []corev1.ConfigMap - - for _, m := range mapList.Items { - maps = append(maps, m) - } - - return maps, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources.go deleted file mode 100644 index f8c9df350..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - - "carvel.dev/kapp/pkg/kapp/logger" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" -) - -type IdentifiedResources struct { - coreClient kubernetes.Interface - resourceTypes ResourceTypes - resources Resources - fallbackAllowedNamespaces []string - logger logger.Logger -} - -func NewIdentifiedResources(coreClient kubernetes.Interface, resourceTypes ResourceTypes, - resources Resources, fallbackAllowedNamespaces []string, logger logger.Logger) IdentifiedResources { - - return IdentifiedResources{coreClient, resourceTypes, resources, - fallbackAllowedNamespaces, logger.NewPrefixed("IdentifiedResources")} -} - -func (r IdentifiedResources) Create(resource Resource) (Resource, error) { - defer r.logger.DebugFunc(fmt.Sprintf("Create(%s)", resource.Description())).Finish() - - resource = resource.DeepCopy() - - err := NewIdentityAnnotation(resource).AddMod().Apply(resource) - if err != nil { - return nil, err - } - - resource, err = r.resources.Create(resource) - if err != nil { - return nil, err - } - - err = NewIdentityAnnotation(resource).RemoveMod().Apply(resource) - if err != nil { - return nil, err - } - - return resource, nil -} - -func (r IdentifiedResources) Update(resource Resource) (Resource, error) { - defer r.logger.DebugFunc(fmt.Sprintf("Update(%s)", resource.Description())).Finish() - - resource = resource.DeepCopy() - - err := NewIdentityAnnotation(resource).AddMod().Apply(resource) - if err != nil { - return nil, err - } - - resource, err = r.resources.Update(resource) - if err != nil { - return nil, err - } - - err = NewIdentityAnnotation(resource).RemoveMod().Apply(resource) - if err != nil { - return nil, err - } - - return resource, nil -} - -func (r IdentifiedResources) Patch(resource Resource, patchType types.PatchType, data []byte) (Resource, error) { - defer r.logger.DebugFunc(fmt.Sprintf("Patch(%s)", resource.Description())).Finish() - return r.resources.Patch(resource, patchType, data) -} - -func (r IdentifiedResources) Delete(resource Resource) error { - defer r.logger.DebugFunc(fmt.Sprintf("Delete(%s)", resource.Description())).Finish() - return r.resources.Delete(resource) -} - -func (r IdentifiedResources) Get(resource Resource) (Resource, error) { - defer r.logger.DebugFunc(fmt.Sprintf("Get(%s)", resource.Description())).Finish() - - resource, err := r.resources.Get(resource) - if err != nil { - return nil, err - } - - err = NewIdentityAnnotation(resource).RemoveMod().Apply(resource) - if err != nil { - return nil, err - } - - return resource, nil -} - -func (r IdentifiedResources) Exists(resource Resource, existsOpts ExistsOpts) (Resource, bool, error) { - defer r.logger.DebugFunc(fmt.Sprintf("Exists(%s)", resource.Description())).Finish() - return r.resources.Exists(resource, existsOpts) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources_list.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources_list.go deleted file mode 100644 index 6a1a83c35..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources_list.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "sort" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type IdentifiedResourcesListOpts struct { - IgnoreCachedResTypes bool - GKsScope []schema.GroupKind - ResourceNamespaces []string -} - -func (r IdentifiedResources) List(labelSelector labels.Selector, resRefs []ResourceRef, opts IdentifiedResourcesListOpts) ([]Resource, error) { - defer r.logger.DebugFunc("List").Finish() - - resTypes, err := r.resourceTypes.All(opts.IgnoreCachedResTypes) - if err != nil { - return nil, err - } - - // TODO non-listable types - resTypes = Listable(resTypes) - - // TODO eliminating events - resTypes = NonMatching(resTypes, ResourceRef{ - schema.GroupVersionResource{Version: "v1", Resource: "events"}, - }) - - // TODO eliminating component statuses - resTypes = NonMatching(resTypes, ResourceRef{ - schema.GroupVersionResource{Version: "v1", Resource: "componentstatuses"}, - }) - - // https://github.com/carvel-dev/kapp/issues/748 - // TODO provide a way to exclude resource via configuration - resTypes = NonMatchingGK(resTypes, schema.GroupKind{Group: "cilium.io", Kind: "CiliumIdentity"}) - - if len(opts.GKsScope) > 0 { - resTypes = MatchingAnyGK(resTypes, opts.GKsScope) - } - - if len(resRefs) > 0 { - resTypes = MatchingAny(resTypes, resRefs) - } - - allOpts := AllOpts{ - ListOpts: &metav1.ListOptions{ - LabelSelector: labelSelector.String(), - }, - ResourceNamespaces: opts.ResourceNamespaces, - } - - resources, err := r.resources.All(resTypes, allOpts) - if err != nil { - return nil, err - } - - // Check returned resources against label selector - // in case of Kubernetes APIs returned resources that do not match. - // This can happen if custom aggregated APIs did not implement label selector filtering. - // (https://github.com/carvel-dev/kapp/issues/160) - var filteredResources []Resource - for _, res := range resources { - if labelSelector.Matches(labels.Set(res.Labels())) { - filteredResources = append(filteredResources, res) - } - } - resources = filteredResources - - // Mark resources that were not created by kapp as transient - for i, res := range resources { - if !NewIdentityAnnotation(res).Valid() { - res.MarkTransient(true) - resources[i] = res - } - } - - return r.pickPreferredVersions(resources) -} - -func (r IdentifiedResources) pickPreferredVersions(resources []Resource) ([]Resource, error) { - var result []Resource - - uniqueByID := map[string][]Resource{} - - for _, res := range resources { - uniqueByID[res.UID()] = append(uniqueByID[res.UID()], res) - } - - for _, rs := range uniqueByID { - var matched bool - - for _, res := range rs { - idAnn := NewIdentityAnnotation(res) - - if idAnn.MatchesVersion() { - err := idAnn.RemoveMod().Apply(res) - if err != nil { - return nil, err - } - - result = append(result, res) - matched = true - break - } - } - - if !matched { - // Sort to have some stability - sort.Slice(rs, func(i, j int) bool { return rs[i].APIVersion() < rs[j].APIVersion() }) - // TODO use preferred version from the api - result = append(result, rs[0]) - } - } - - return result, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources_pods.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources_pods.go deleted file mode 100644 index 5a349bf31..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/identified_resources_pods.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" -) - -func (r IdentifiedResources) PodResources(labelSelector labels.Selector, resourceNamespaces []string) UniquePodWatcher { - return UniquePodWatcher{labelSelector, uniqAndValidNamespaces(append(r.fallbackAllowedNamespaces, resourceNamespaces...)), r.coreClient} -} - -type PodWatcherI interface { - Watch(podsToWatchCh chan corev1.Pod, cancelCh chan struct{}) error -} - -type UniquePodWatcher struct { - labelSelector labels.Selector - fallbackAllowedNamespaces []string - coreClient kubernetes.Interface -} - -var _ PodWatcherI = UniquePodWatcher{} - -func (w UniquePodWatcher) Watch(podsToWatchCh chan corev1.Pod, cancelCh chan struct{}) error { - nonUniquePodsToWatchCh := make(chan corev1.Pod) - - go func() { - // Watch Pods in all namespaces first and fallback to the - // fallbackAllowedNamespaces if lack of permission - namespaces := []string{""} - namespaces = append(namespaces, w.fallbackAllowedNamespaces...) - var forbiddenNamespaces []string - - for _, namespace := range namespaces { - podWatcher := NewPodWatcher( - w.coreClient.CoreV1().Pods(namespace), - metav1.ListOptions{LabelSelector: w.labelSelector.String()}, - ) - err := podWatcher.Watch(nonUniquePodsToWatchCh, cancelCh) - if err == nil { - if namespace == "" { - break - } - continue - } - if !errors.IsForbidden(err) { - fmt.Printf("Pod watching error: %s\n", err) // TODO - break - } - if namespace != "" { - forbiddenNamespaces = append(forbiddenNamespaces, fmt.Sprintf(`"%s"`, namespace)) - } - } - - if len(forbiddenNamespaces) > 0 { - fmt.Printf(`Pod watching error: pods is forbidden: User cannot list resource "pods" in API group "" in the namespace(s) %s`, strings.Join(forbiddenNamespaces, ", ")) - } - close(nonUniquePodsToWatchCh) - }() - - // Send unique pods to the watcher client - watchedPods := map[string]struct{}{} - - for pod := range nonUniquePodsToWatchCh { - podUID := string(pod.UID) - if _, found := watchedPods[podUID]; found { - continue - } - - watchedPods[podUID] = struct{}{} - podsToWatchCh <- pod - } - - return nil -} - -type FilteringPodWatcher struct { - MatcherFunc func(*corev1.Pod) bool - Watcher PodWatcherI -} - -var _ PodWatcherI = FilteringPodWatcher{} - -func (w FilteringPodWatcher) Watch(podsToWatchCh chan corev1.Pod, cancelCh chan struct{}) error { - filteredCh := make(chan corev1.Pod) - - go func() { - err := w.Watcher.Watch(filteredCh, cancelCh) - if err != nil { - fmt.Printf("Pod watching error: %s\n", err) // TODO - } - - close(filteredCh) - }() - - for pod := range filteredCh { - if w.MatcherFunc(&pod) { - podsToWatchCh <- pod - } - } - - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/identity_annotation.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/identity_annotation.go deleted file mode 100644 index b9138ff8e..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/identity_annotation.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "strings" -) - -const ( - // Version annotation is used to indicate deployed resource version, - // but is also used to determine transientiveness (kapp deploy vs some controller) - kappIdentityAnnKey = "kapp.k14s.io/identity" - kappIdentityAnnV1 = "v1" -) - -type IdentityAnnotation struct { - resource Resource -} - -func NewIdentityAnnotation(resource Resource) IdentityAnnotation { - return IdentityAnnotation{resource} -} - -// Valid returns true if signature matches resource itself -func (a IdentityAnnotation) Valid() bool { - pieces := strings.Split(a.resource.Annotations()[kappIdentityAnnKey], ";") - - switch pieces[0] { - case kappIdentityAnnV1: - if len(pieces) != 3 { - return false - } - return NewUniqueResourceKey(a.resource).String() == pieces[1] - - default: - return false - } -} - -// MatchesVersion returns true if annotation is valid and it matches version -func (a IdentityAnnotation) MatchesVersion() bool { - if !a.Valid() { - return false - } - - pieces := strings.Split(a.resource.Annotations()[kappIdentityAnnKey], ";") - - switch pieces[0] { - case kappIdentityAnnV1: - return a.resource.APIVersion() == pieces[2] - - default: - return false - } -} - -func (a IdentityAnnotation) v1Value() string { - return kappIdentityAnnV1 + ";" + NewUniqueResourceKey(a.resource).String() + ";" + a.resource.APIVersion() -} - -func (a IdentityAnnotation) AddMod() StringMapAppendMod { - return StringMapAppendMod{ - ResourceMatcher: AllMatcher{}, - Path: NewPathFromStrings([]string{"metadata", "annotations"}), - KVs: map[string]string{kappIdentityAnnKey: a.v1Value()}, - } -} - -func (a IdentityAnnotation) RemoveMod() FieldRemoveMod { - return FieldRemoveMod{ - ResourceMatcher: AllMatcher{}, - Path: NewPathFromStrings([]string{"metadata", "annotations", kappIdentityAnnKey}), - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/label.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/label.go deleted file mode 100644 index 21f0fbc59..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/label.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" -) - -type SimpleLabel struct { - labelSelector labels.Selector -} - -func NewSimpleLabel(labelSelector labels.Selector) SimpleLabel { - return SimpleLabel{labelSelector} -} - -func (a SimpleLabel) KV() (string, string, error) { - reqs, selectable := a.labelSelector.Requirements() - if !selectable { - return "", "", fmt.Errorf("Expected label selector to be selectable") - } - - if len(reqs) != 1 { - return "", "", fmt.Errorf("Expected label selector to have one label KV") - } - - key := reqs[0].Key() - op := reqs[0].Operator() - val, _ := reqs[0].Values().PopAny() - - if op != selection.Equals && op != selection.In { - return "", "", fmt.Errorf("Expected label selector to check for equality") - } - - if reqs[0].Values().Len() != 1 { - return "", "", fmt.Errorf("Expected label selector to check against single label value") - } - - return key, val, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/labeled_resources.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/labeled_resources.go deleted file mode 100644 index 00c59c1e3..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/labeled_resources.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "slices" - "strings" - "sync" - - "carvel.dev/kapp/pkg/kapp/logger" - "carvel.dev/kapp/pkg/kapp/util" - "k8s.io/apimachinery/pkg/labels" -) - -const ( - ExistsAnnKey = "kapp.k14s.io/exists" // Value is ignored - NoopAnnKey = "kapp.k14s.io/noop" // value is ignored -) - -type OwnershipLabelModsFunc func(kvs map[string]string) []StringMapAppendMod -type LabelScopingModsFunc func(kvs map[string]string) []StringMapAppendMod - -type LabeledResources struct { - labelSelector labels.Selector - identifiedResources IdentifiedResources - logger logger.Logger -} - -func NewLabeledResources(labelSelector labels.Selector, - identifiedResources IdentifiedResources, logger logger.Logger) *LabeledResources { - - return &LabeledResources{labelSelector, identifiedResources, logger.NewPrefixed("LabeledResources")} -} - -// Modifies passed resources for labels and ownership -func (a *LabeledResources) Prepare(resources []Resource, olmFunc OwnershipLabelModsFunc, - lsmFunc LabelScopingModsFunc, additionalLabels map[string]string) error { - - defer a.logger.DebugFunc("Prepare").Finish() - - labelKey, labelVal, err := NewSimpleLabel(a.labelSelector).KV() - if err != nil { - return err - } - - for _, res := range resources { - assocLabel := NewAssociationLabel(res) - ownershipLabels := map[string]string{ - labelKey: labelVal, - assocLabel.Key(): assocLabel.Value(), - } - - for k, v := range additionalLabels { - ownershipLabels[k] = v - } - - for _, t := range olmFunc(ownershipLabels) { - err := t.Apply(res) - if err != nil { - return err - } - } - - for _, t := range lsmFunc(map[string]string{labelKey: labelVal}) { - err := t.Apply(res) - if err != nil { - return err - } - } - } - - return nil -} - -func (a *LabeledResources) GetAssociated(resource Resource, resRefs []ResourceRef) ([]Resource, error) { - defer a.logger.DebugFunc("GetAssociated").Finish() - return a.identifiedResources.List(NewAssociationLabel(resource).AsSelector(), resRefs, IdentifiedResourcesListOpts{}) -} - -func (a *LabeledResources) All(listOpts IdentifiedResourcesListOpts) ([]Resource, error) { - defer a.logger.DebugFunc("All").Finish() - - resources, err := a.identifiedResources.List(a.labelSelector, nil, listOpts) - if err != nil { - return nil, err - } - - return resources, nil -} - -type AllAndMatchingOpts struct { - ExistingNonLabeledResourcesCheck bool - ExistingNonLabeledResourcesCheckConcurrency int - SkipResourceOwnershipCheck bool - SkipOwnershipCheckAllowedApps []string - IsNewApp bool - - DisallowedResourcesByLabelKeys []string - LabelErrorResolutionFunc func(string, string) string - LabelValAppMapResolverFunc func() map[string]string - - IdentifiedResourcesListOpts IdentifiedResourcesListOpts -} - -// AllAndMatching returns set of all labeled resources -// plus resources that match newResources. -// Returns errors if non-labeled resources were labeled -// with a different value. -func (a *LabeledResources) AllAndMatching(newResources []Resource, opts AllAndMatchingOpts) ([]Resource, error) { - defer a.logger.DebugFunc("AllAndMatching").Finish() - - var ( - resources []Resource - err error - ) - - // avoid listing labeled resources for newly created app - if !opts.IsNewApp { - resources, err = a.All(opts.IdentifiedResourcesListOpts) - if err != nil { - return nil, err - } - } - - var nonLabeledResources []Resource - - if opts.ExistingNonLabeledResourcesCheck { - nonLabeledResources, err = a.findNonLabeledResources( - resources, newResources, opts.ExistingNonLabeledResourcesCheckConcurrency) - if err != nil { - return nil, err - } - } - - if len(nonLabeledResources) > 0 && !opts.SkipResourceOwnershipCheck { - resourcesForCheck := a.resourcesForOwnershipCheck(newResources, nonLabeledResources) - if len(resourcesForCheck) > 0 { - err := a.checkResourceOwnership(resourcesForCheck, opts) - if err != nil { - return nil, err - } - } - } - - resources = append(resources, nonLabeledResources...) - - err = a.checkDisallowedLabels(resources, opts.DisallowedResourcesByLabelKeys) - if err != nil { - return nil, err - } - - return resources, nil -} - -func (a *LabeledResources) resourcesForOwnershipCheck(newResources []Resource, nonLabeledResources []Resource) []Resource { - var resources []Resource - - resourcesToBeSkipped := map[string]bool{} - - for _, res := range newResources { - _, hasExistsAnnotation := res.Annotations()[ExistsAnnKey] - _, hasNoopAnnotation := res.Annotations()[NoopAnnKey] - if hasExistsAnnotation || hasNoopAnnotation { - resourcesToBeSkipped[NewUniqueResourceKey(res).String()] = true - } - } - - for _, res := range nonLabeledResources { - if !resourcesToBeSkipped[NewUniqueResourceKey(res).String()] { - resources = append(resources, res) - } - } - - return resources -} - -func (a *LabeledResources) checkResourceOwnership(resources []Resource, opts AllAndMatchingOpts) error { - expectedLabelKey, expectedLabelVal, err := NewSimpleLabel(a.labelSelector).KV() - if err != nil { - return err - } - - var errs []error - labelValAppMap := map[string]string{} - isSelectiveOwnershipOverride := len(opts.SkipOwnershipCheckAllowedApps) > 0 - if isSelectiveOwnershipOverride { - labelValAppMap = opts.LabelValAppMapResolverFunc() - } - - for _, res := range resources { - if val, found := res.Labels()[expectedLabelKey]; found { - ownershipOverrideAllowed := false - - if isSelectiveOwnershipOverride { - ownershipOverrideAllowed = a.ownershipOverrideAllowed(labelValAppMap, res, - expectedLabelKey, opts.SkipOwnershipCheckAllowedApps) - } - if val != expectedLabelVal && !ownershipOverrideAllowed { - ownerMsg := fmt.Sprintf("different label '%s=%s'", expectedLabelKey, val) - if opts.LabelErrorResolutionFunc != nil { - ownerMsgSuggested := opts.LabelErrorResolutionFunc(expectedLabelKey, val) - if len(ownerMsgSuggested) > 0 { - ownerMsg = ownerMsgSuggested - } - } - errMsg := "Resource '%s' is already associated with a %s" - errs = append(errs, fmt.Errorf(errMsg, res.Description(), ownerMsg)) - } - } - } - - if len(errs) > 0 { - var msgs []string - for _, err := range errs { - msgs = append(msgs, "- "+err.Error()) - } - return fmt.Errorf("Ownership errors:\n%s", strings.Join(msgs, "\n")) - } - - return nil -} - -func (a *LabeledResources) ownershipOverrideAllowed(labelValAppMap map[string]string, res Resource, - expectedLabelKey string, overrideAllowedApps []string) bool { - labelVal, found := res.Labels()[expectedLabelKey] - if !found { - return true - } - appName, found := labelValAppMap[labelVal] - if !found { - return false - } - return slices.Contains(overrideAllowedApps, appName) -} - -func (a *LabeledResources) checkDisallowedLabels(resources []Resource, disallowedLblKeys []string) error { - var errs []error - - for _, res := range resources { - labels := res.Labels() - for _, disallowedLblKey := range disallowedLblKeys { - if _, found := labels[disallowedLblKey]; found { - errMsg := "Resource '%s' has a disallowed label '%s'" - errs = append(errs, fmt.Errorf(errMsg, res.Description(), disallowedLblKey)) - } - } - } - - if len(errs) > 0 { - var msgs []string - for _, err := range errs { - msgs = append(msgs, "- "+err.Error()) - } - return fmt.Errorf("Disallowed labels errors:\n%s", strings.Join(msgs, "\n")) - } - - return nil -} - -func (a *LabeledResources) findNonLabeledResources(labeledResources, newResources []Resource, concurrency int) ([]Resource, error) { - defer a.logger.DebugFunc("findNonLabeledResources").Finish() - - var foundResources []Resource - rsMap := map[string]struct{}{} - - for _, res := range labeledResources { - rsMap[NewUniqueResourceKey(res).String()] = struct{}{} - } - - var wg sync.WaitGroup - throttle := util.NewThrottle(concurrency) - - errCh := make(chan error, len(newResources)) - resCh := make(chan Resource, len(newResources)) - - for _, res := range newResources { - res := res // copy - - if _, found := rsMap[NewUniqueResourceKey(res).String()]; !found { - wg.Add(1) - go func() { - throttle.Take() - defer throttle.Done() - - defer func() { wg.Done() }() - - clusterRes, exists, err := a.identifiedResources.Exists(res, ExistsOpts{}) - if err != nil { - errCh <- err - return - } - - if exists { - resCh <- clusterRes - } - }() - } - } - - wg.Wait() - close(errCh) - close(resCh) - - for err := range errCh { - return nil, err - } - for res := range resCh { - foundResources = append(foundResources, res) - } - - return foundResources, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/matcher_empty_field.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/matcher_empty_field.go deleted file mode 100644 index 0cd042c18..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/matcher_empty_field.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" -) - -type EmptyFieldMatcher struct { - Path Path -} - -var _ ResourceMatcher = EmptyFieldMatcher{} - -func (m EmptyFieldMatcher) Matches(res Resource) bool { - return m.check(res.unstructured().Object, m.Path) -} - -func (m EmptyFieldMatcher) check(obj interface{}, path Path) bool { - for i, part := range path { - switch { - case part.MapKey != nil: - typedObj, ok := obj.(map[string]interface{}) - if !ok { - return obj == nil - } - - var found bool - obj, found = typedObj[*part.MapKey] - if !found { - // It's not found, so it must be empty - return true - } - - case part.ArrayIndex != nil: - switch { - case part.ArrayIndex.All != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return obj == nil - } - - for _, obj := range typedObj { - empty := m.check(obj, path[i+1:]) - if !empty { - return false - } - } - - return true - - case part.ArrayIndex.Index != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return obj == nil - } - - if *part.ArrayIndex.Index < len(typedObj) { - obj = typedObj[*part.ArrayIndex.Index] - } else { - // Index not found, it's empty - return true - } - - default: - panic(fmt.Sprintf("Unknown array index: %#v", part.ArrayIndex)) - } - - default: - panic(fmt.Sprintf("Unexpected path part: %#v", part)) - } - } - - switch typedObj := obj.(type) { - case nil: - return true - case []interface{}: - return len(typedObj) == 0 - case map[string]interface{}: - return len(typedObj) == 0 - default: - return false - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/matchers.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/matchers.go deleted file mode 100644 index 7906218db..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/matchers.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -type ResourceMatcher interface { - Matches(Resource) bool -} - -type APIGroupKindMatcher struct { - APIGroup string - Kind string -} - -var _ ResourceMatcher = APIGroupKindMatcher{} - -func (m APIGroupKindMatcher) Matches(res Resource) bool { - return res.APIGroup() == m.APIGroup && res.Kind() == m.Kind -} - -type APIVersionKindMatcher struct { - APIVersion string - Kind string -} - -var _ ResourceMatcher = APIVersionKindMatcher{} - -func (m APIVersionKindMatcher) Matches(res Resource) bool { - return res.APIVersion() == m.APIVersion && res.Kind() == m.Kind -} - -type KindNamespaceNameMatcher struct { - Kind, Namespace, Name string -} - -var _ ResourceMatcher = KindNamespaceNameMatcher{} - -func (m KindNamespaceNameMatcher) Matches(res Resource) bool { - return res.Kind() == m.Kind && res.Namespace() == m.Namespace && res.Name() == m.Name -} - -type AllMatcher struct{} - -var _ ResourceMatcher = AllMatcher{} - -func (AllMatcher) Matches(Resource) bool { return true } - -type AnyMatcher struct { - Matchers []ResourceMatcher -} - -var _ ResourceMatcher = AnyMatcher{} - -func (m AnyMatcher) Matches(res Resource) bool { - for _, m := range m.Matchers { - if m.Matches(res) { - return true - } - } - return false -} - -type NotMatcher struct { - Matcher ResourceMatcher -} - -var _ ResourceMatcher = NotMatcher{} - -func (m NotMatcher) Matches(res Resource) bool { - return !m.Matcher.Matches(res) -} - -type AndMatcher struct { - Matchers []ResourceMatcher -} - -var _ ResourceMatcher = AndMatcher{} - -func (m AndMatcher) Matches(res Resource) bool { - for _, m := range m.Matchers { - if !m.Matches(res) { - return false - } - } - return true -} - -type HasAnnotationMatcher struct { - Keys []string -} - -var _ ResourceMatcher = HasAnnotationMatcher{} - -func (m HasAnnotationMatcher) Matches(res Resource) bool { - anns := res.Annotations() - for _, key := range m.Keys { - if _, found := anns[key]; !found { - return false - } - } - return true -} - -type HasNamespaceMatcher struct { - Names []string -} - -var _ ResourceMatcher = HasNamespaceMatcher{} - -func (m HasNamespaceMatcher) Matches(res Resource) bool { - resNs := res.Namespace() - if len(resNs) == 0 { - return false // cluster resource - } - if len(m.Names) == 0 { - return true // matches any name, but not cluster - } - for _, name := range m.Names { - if name == resNs { - return true - } - } - return false -} - -var ( - // TODO should we just generically match *.k8s.io? - // Based on https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#-strong-api-groups-strong- - builtinAPIGroups = map[string]struct{}{ - "": struct{}{}, - "admissionregistration.k8s.io": struct{}{}, - "apiextensions.k8s.io": struct{}{}, - "apiregistration.k8s.io": struct{}{}, - "apps": struct{}{}, - "authentication.k8s.io": struct{}{}, - "authorization.k8s.io": struct{}{}, - "autoscaling": struct{}{}, - "batch": struct{}{}, - "certificates.k8s.io": struct{}{}, - "coordination.k8s.io": struct{}{}, - "discovery.k8s.io": struct{}{}, - "events.k8s.io": struct{}{}, - "extensions": struct{}{}, - "flowcontrol.apiserver.k8s.io": struct{}{}, - "internal.apiserver.k8s.io": struct{}{}, - "metrics.k8s.io": struct{}{}, - "migration.k8s.io": struct{}{}, - "networking.k8s.io": struct{}{}, - "node.k8s.io": struct{}{}, - "policy": struct{}{}, - "rbac.authorization.k8s.io": struct{}{}, - "scheduling.k8s.io": struct{}{}, - "storage.k8s.io": struct{}{}, - } -) - -type CustomResourceMatcher struct{} - -var _ ResourceMatcher = CustomResourceMatcher{} - -func (m CustomResourceMatcher) Matches(res Resource) bool { - _, found := builtinAPIGroups[res.APIGroup()] - return !found -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_field_copy.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_field_copy.go deleted file mode 100644 index b28929ca1..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_field_copy.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "regexp" -) - -type FieldCopyModSource string - -const ( - FieldCopyModSourceNew FieldCopyModSource = "new" - FieldCopyModSourceExisting = "existing" -) - -type FieldCopyMod struct { - ResourceMatcher ResourceMatcher - Path Path - Sources []FieldCopyModSource // first preferred -} - -var _ ResourceModWithMultiple = FieldCopyMod{} - -func (t FieldCopyMod) IsResourceMatching(res Resource) bool { - if res == nil || !t.ResourceMatcher.Matches(res) { - return false - } - return true -} - -func (t FieldCopyMod) ApplyFromMultiple(res Resource, srcs map[FieldCopyModSource]Resource) error { - for _, src := range t.Sources { - source, found := srcs[src] - if !found { - continue - } - // Make a copy of resource, to avoid modifications - // that may be done even in case when there is nothing to copy - updatedRes := res.DeepCopy() - source = source.DeepCopy() - updated, err := t.apply(updatedRes.unstructured().Object, source.unstructured().Object, t.Path, Path{}, srcs) - if err != nil { - return fmt.Errorf("FieldCopyMod for path '%s' on resource '%s': %s", t.Path.AsString(), res.Description(), err) - } - if updated { - res.setUnstructured(updatedRes.unstructured()) - } - } - - return nil -} - -func (t FieldCopyMod) apply(obj interface{}, srcObj interface{}, path Path, fullPath Path, srcs map[FieldCopyModSource]Resource) (bool, error) { - for i, part := range path { - isLast := len(path) == i+1 - fullPath = append(fullPath, part) - - switch { - case part.MapKey != nil: - srcTypedObj, ok := srcObj.(map[string]interface{}) - if !ok { - return false, fmt.Errorf("Unexpected non-map found: %T", srcObj) - } - typedObj, ok := obj.(map[string]interface{}) - if !ok { - return false, fmt.Errorf("Unexpected non-map found: %T", obj) - } - - if isLast { - return t.copyIntoMap(typedObj, fullPath, srcs) - } - - var ( - found bool - srcObjFound bool - ) - srcObj, srcObjFound = srcTypedObj[*part.MapKey] - if !srcObjFound || srcObj == nil { - return false, nil - } - - obj, found = typedObj[*part.MapKey] - // TODO check strictness? - if !found || obj == nil { - // create empty maps if there are no downstream array indexes; - // if there are, we cannot make them anyway, so just exit - if path.ContainsArrayIndex() { - return false, nil - } - obj = map[string]interface{}{} - typedObj[*part.MapKey] = obj - } - - case part.ArrayIndex != nil: - if isLast { - return false, fmt.Errorf("Expected last part of the path to be map key") - } - - switch { - case part.ArrayIndex.All != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return false, fmt.Errorf("Unexpected non-array found: %T", obj) - } - - srcTypedObj, ok := srcObj.([]interface{}) - if !ok { - return false, fmt.Errorf("Unexpected non-array found: %T", srcObj) - } - - var anyUpdated bool - - for objI, obj := range typedObj { - objI := objI - - newFullPath := append([]*PathPart{}, fullPath...) - newFullPath[len(newFullPath)-1] = &PathPart{ArrayIndex: &PathPartArrayIndex{Index: &objI}} - - var srcTypeObj map[string]interface{} - if objI < len(srcTypedObj) { - srcTypeObj = srcTypedObj[objI].(map[string]interface{}) - } - updated, err := t.apply(obj, srcTypeObj, path[i+1:], newFullPath, srcs) - if err != nil { - return false, err - } - if updated { - anyUpdated = true - } - } - - return anyUpdated, nil // dealt with children, get out - - case part.ArrayIndex.Index != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return false, fmt.Errorf("Unexpected non-array found: %T", obj) - } - - srcTypedObj, ok := srcObj.([]interface{}) - if !ok { - return false, fmt.Errorf("Unexpected non-array found: %T", srcObj) - } - - if *part.ArrayIndex.Index < len(typedObj) { - obj = typedObj[*part.ArrayIndex.Index] - srcObj = srcTypedObj[*part.ArrayIndex.Index] - return t.apply(obj, srcObj, path[i+1:], fullPath, srcs) - } - - return false, nil // index not found, nothing to append to - - default: - panic(fmt.Sprintf("Unknown array index: %#v", part.ArrayIndex)) - } - - case part.Regex != nil: - if part.Regex.Regex == nil { - panic("Regex should be non nil") - } - matchedKeys, err := matchRegexWithSrcObj(*part.Regex.Regex, srcObj) - if err != nil { - return false, err - } - var anyUpdated bool - for _, key := range matchedKeys { - newPath := append(Path{&PathPart{MapKey: &key}}, path[i+1:]...) - newFullPath := fullPath[:len(fullPath)-1] - updated, err := t.apply(obj, srcObj, newPath, newFullPath, srcs) - if err != nil { - return false, err - } - if updated { - anyUpdated = true - } - } - - return anyUpdated, nil - - default: - panic(fmt.Sprintf("Unexpected path part: %#v", part)) - } - } - - panic("unreachable") -} - -func (t FieldCopyMod) copyIntoMap(obj map[string]interface{}, fullPath Path, srcs map[FieldCopyModSource]Resource) (bool, error) { - lastPartPath := fullPath[len(fullPath)-1] - if lastPartPath.MapKey == nil { - return false, fmt.Errorf("Expected last path part to be map-key") - } - - for _, src := range t.Sources { - srcRes, found := srcs[src] - if !found || srcRes == nil { - continue - } - - val, found, err := t.obtainValue(srcRes.DeepCopy().unstructured().Object, fullPath) - if err != nil { - return false, err - } else if !found { - continue - } - - obj[*lastPartPath.MapKey] = val - return true, nil - } - - return false, nil -} - -func (t FieldCopyMod) obtainValue(obj interface{}, path Path) (interface{}, bool, error) { - for i, part := range path { - isLast := len(path) == i+1 - - switch { - case part.MapKey != nil: - typedObj, ok := obj.(map[string]interface{}) - if !ok && typedObj != nil { - return nil, false, fmt.Errorf("Unexpected non-map found: %T", obj) - } - - var found bool - obj, found = typedObj[*part.MapKey] - if !found { - return nil, false, nil // index is not found return - } - - case part.ArrayIndex != nil: - if isLast { - return nil, false, fmt.Errorf("Expected last part of the path to be map key") - } - - switch { - case part.ArrayIndex.Index != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return nil, false, fmt.Errorf("Unexpected non-array found: %T", obj) - } - - if *part.ArrayIndex.Index < len(typedObj) { - obj = typedObj[*part.ArrayIndex.Index] - } else { - return nil, false, nil // index not found, return - } - - default: - panic(fmt.Sprintf("Unknown array index: %#v", part.ArrayIndex)) - } - - default: - panic(fmt.Sprintf("Unexpected path part: %#v", part)) - } - } - - return obj, true, nil -} - -func matchRegexWithSrcObj(regexString string, srcObj interface{}) ([]string, error) { - var matchedKeys []string - regex, err := regexp.Compile(regexString) - if err != nil { - return matchedKeys, err - } - srcTypedObj, ok := srcObj.(map[string]interface{}) - if !ok && srcTypedObj != nil { - return matchedKeys, fmt.Errorf("Unexpected non-map found: %T", srcObj) - } - for key := range srcTypedObj { - if regex.MatchString(key) { - matchedKeys = append(matchedKeys, key) - } - } - return matchedKeys, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_field_remove.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_field_remove.go deleted file mode 100644 index 465477003..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_field_remove.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" -) - -type FieldRemoveMod struct { - ResourceMatcher ResourceMatcher - Path Path -} - -var _ ResourceMod = FieldRemoveMod{} -var _ ResourceModWithMultiple = FieldCopyMod{} - -func (t FieldRemoveMod) IsResourceMatching(res Resource) bool { - if res == nil || !t.ResourceMatcher.Matches(res) { - return false - } - return true -} - -func (t FieldRemoveMod) ApplyFromMultiple(res Resource, _ map[FieldCopyModSource]Resource) error { - return t.Apply(res) -} - -func (t FieldRemoveMod) Apply(res Resource) error { - err := t.apply(res.unstructured().Object, t.Path) - if err != nil { - return fmt.Errorf("FieldRemoveMod for path '%s' on resource '%s': %w", t.Path.AsString(), res.Description(), err) - } - return nil -} - -func (t FieldRemoveMod) apply(obj interface{}, path Path) error { - for i, part := range path { - isLast := len(path) == i+1 - - switch { - case part.MapKey != nil: - typedObj, ok := obj.(map[string]interface{}) - if !ok { - // TODO check strictness? - if typedObj == nil { - return nil // map is a nil, nothing to remove - } - return fmt.Errorf("Unexpected non-map found: %T", obj) - } - - if isLast { - delete(typedObj, *part.MapKey) - return nil - } - - var found bool - obj, found = typedObj[*part.MapKey] - if !found { - return nil // map key is not found, nothing to remove - } - - case part.ArrayIndex != nil: - if isLast { - return fmt.Errorf("Expected last part of the path to be map key") - } - - switch { - case part.ArrayIndex.All != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-array found: %T", obj) - } - - for _, obj := range typedObj { - err := t.apply(obj, path[i+1:]) - if err != nil { - return err - } - } - - return nil // dealt with children, get out - - case part.ArrayIndex.Index != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-array found: %T", obj) - } - - if *part.ArrayIndex.Index < len(typedObj) { - obj = typedObj[*part.ArrayIndex.Index] - } else { - return nil // index not found, nothing to remove - } - - default: - panic(fmt.Sprintf("Unknown array index: %#v", part.ArrayIndex)) - } - case part.Regex != nil: - if part.Regex.Regex == nil { - panic("Regex should be non nil") - } - matchedKeys, err := matchRegexWithSrcObj(*part.Regex.Regex, obj) - if err != nil { - return err - } - for _, key := range matchedKeys { - newPath := append(Path{&PathPart{MapKey: &key}}, path[i+1:]...) - err := t.apply(obj, newPath) - if err != nil { - return err - } - } - - return nil - - default: - panic(fmt.Sprintf("Unexpected path part: %#v", part)) - } - } - - panic("unreachable") -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_object_ref_set.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_object_ref_set.go deleted file mode 100644 index 03af635f2..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_object_ref_set.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" -) - -type ObjectRefSetMod struct { - ResourceMatcher ResourceMatcher - Path Path - ReplacementFunc func(map[string]interface{}) error -} - -var _ ResourceMod = ObjectRefSetMod{} - -func (t ObjectRefSetMod) Apply(res Resource) error { - if !t.ResourceMatcher.Matches(res) { - return nil - } - err := t.apply(res.unstructured().Object, t.Path) - if err != nil { - return fmt.Errorf("ObjectRefSetMod for path '%s' on resource '%s': %w", t.Path.AsString(), res.Description(), err) - } - return nil -} - -func (t ObjectRefSetMod) apply(obj interface{}, path Path) error { - for i, part := range path { - switch { - case part.MapKey != nil: - typedObj, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-map found: %T", obj) - } - - var found bool - obj, found = typedObj[*part.MapKey] - if !found { - return nil - } - - case part.ArrayIndex != nil: - switch { - case part.ArrayIndex.All != nil: - if obj == nil { - return nil - } - typedObj, ok := obj.([]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-array found: %T", obj) - } - - for _, obj := range typedObj { - err := t.apply(obj, path[i+1:]) - if err != nil { - return err - } - } - - return nil // dealt with children, get out - - case part.ArrayIndex.Index != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-array found: %T", obj) - } - - if *part.ArrayIndex.Index < len(typedObj) { - return t.apply(typedObj[*part.ArrayIndex.Index], path[i+1:]) - } - - return nil // index not found, nothing to append to - - default: - panic(fmt.Sprintf("Unknown array index: %#v", part.ArrayIndex)) - } - - case part.Regex != nil: - panic("Regex in path part is only supported for rebaseRules.") - - default: - panic(fmt.Sprintf("Unexpected path part: %#v", part)) - } - } - - typedObj, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-map found: %T", obj) - } - - return t.ReplacementFunc(typedObj) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_path.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_path.go deleted file mode 100644 index 1720aec2a..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_path.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "encoding/json" - "fmt" - "strings" -) - -type ResourceMod interface { - Apply(Resource) error -} - -type ResourceModWithMultiple interface { - ApplyFromMultiple(Resource, map[FieldCopyModSource]Resource) error - IsResourceMatching(resource Resource) bool -} - -type Path []*PathPart - -type PathPart struct { - MapKey *string - Regex *PathPartRegex - ArrayIndex *PathPartArrayIndex -} - -var _ json.Unmarshaler = &PathPart{} - -type PathPartArrayIndex struct { - Index *int - All *bool `json:"allIndexes"` -} - -type PathPartRegex struct { - Regex *string `json:"regex"` -} - -func NewPathFromStrings(strs []string) Path { - var path Path - for _, str := range strs { - path = append(path, NewPathPartFromString(str)) - } - return path -} - -func NewPathFromInterfaces(parts []interface{}) Path { - var path Path - for _, part := range parts { - switch typedPart := part.(type) { - case string: - path = append(path, NewPathPartFromString(typedPart)) - case int: - path = append(path, NewPathPartFromIndex(typedPart)) - default: - panic(fmt.Sprintf("Unexpected part type %T", typedPart)) - } - } - return path -} - -func (p Path) AsStrings() []string { - var result []string - for _, part := range p { - if part.MapKey == nil { - panic(fmt.Sprintf("Unexpected non-map-key path part '%#v'", part)) - } - result = append(result, *part.MapKey) - } - return result -} - -func (p Path) AsString() string { - var result []string - for _, part := range p { - result = append(result, part.AsString()) - } - return strings.Join(result, ",") -} - -func (p Path) ContainsNonMapKeys() bool { - for _, part := range p { - if part.MapKey == nil { - return true - } - } - return false -} - -func (p Path) ContainsArrayIndex() bool { - for _, part := range p { - if part.ArrayIndex != nil { - return true - } - } - return false -} - -func NewPathPartFromString(str string) *PathPart { - return &PathPart{MapKey: &str} -} - -func NewPathPartFromIndex(i int) *PathPart { - return &PathPart{ArrayIndex: &PathPartArrayIndex{Index: &i}} -} - -func NewPathPartFromIndexAll() *PathPart { - trueBool := true - return &PathPart{ArrayIndex: &PathPartArrayIndex{All: &trueBool}} -} - -func (p *PathPart) AsString() string { - switch { - case p.MapKey != nil: - return *p.MapKey - case p.ArrayIndex != nil && p.ArrayIndex.Index != nil: - return fmt.Sprintf("%d", *p.ArrayIndex.Index) - case p.ArrayIndex != nil && p.ArrayIndex.All != nil: - return "(all)" - case p.Regex != nil && p.Regex.Regex != nil: - return *p.Regex.Regex - default: - panic("Unknown path part") - } -} - -func (p *PathPart) UnmarshalJSON(data []byte) error { - var str string - var idx PathPartArrayIndex - var regx PathPartRegex - - switch { - case json.Unmarshal(data, &str) == nil: - p.MapKey = &str - case json.Unmarshal(data, ®x) == nil && regx.Regex != nil: - p.Regex = ®x - case json.Unmarshal(data, &idx) == nil: - p.ArrayIndex = &idx - default: - return fmt.Errorf("Unknown path part") - } - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_string_map_append.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_string_map_append.go deleted file mode 100644 index 19e1af1c8..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/mod_string_map_append.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" -) - -type StringMapAppendMod struct { - // For example applies to Deployment, ReplicaSet, StatefulSet - // TODO should there be an opt-out way? - ResourceMatcher ResourceMatcher - Path Path - SkipIfNotFound bool - KVs map[string]string -} - -var _ ResourceMod = StringMapAppendMod{} - -func (t StringMapAppendMod) Apply(res Resource) error { - if !t.ResourceMatcher.Matches(res) { - return nil - } - err := t.apply(res.unstructured().Object, t.Path) - if err != nil { - return fmt.Errorf("StringMapAppendMod for path '%s' on resource '%s': %w", t.Path.AsString(), res.Description(), err) - } - return nil -} - -func (t StringMapAppendMod) apply(obj interface{}, path Path) error { - for i, part := range path { - switch { - case part.MapKey != nil: - typedObj, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-map found: %T", obj) - } - - var found bool - obj, found = typedObj[*part.MapKey] - // TODO check strictness? - if !found || obj == nil { - // create empty maps if there are no downstream array indexes; - // if there are, we cannot make them anyway, so just exit - if t.SkipIfNotFound || path.ContainsNonMapKeys() { - return nil - } - obj = map[string]interface{}{} - typedObj[*part.MapKey] = obj - } - - case part.ArrayIndex != nil: - switch { - case part.ArrayIndex.All != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-array found: %T", obj) - } - - for _, obj := range typedObj { - err := t.apply(obj, path[i+1:]) - if err != nil { - return err - } - } - - return nil // dealt with children, get out - - case part.ArrayIndex.Index != nil: - typedObj, ok := obj.([]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-array found: %T", obj) - } - - if *part.ArrayIndex.Index < len(typedObj) { - return t.apply(typedObj[*part.ArrayIndex.Index], path[i+1:]) - } - - return nil // index not found, nothing to append to - - default: - panic(fmt.Sprintf("Unknown array index: %#v", part.ArrayIndex)) - } - - case part.Regex != nil: - panic("Regex in path part is only supported for rebaseRules.") - - default: - panic(fmt.Sprintf("Unexpected path part: %#v", part)) - } - } - - typedObj, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("Unexpected non-map found: %T", obj) - } - - for k, v := range t.KVs { - typedObj[k] = v - } - - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/pod_watcher.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/pod_watcher.go deleted file mode 100644 index c743f844f..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/pod_watcher.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" -) - -type PodWatcher struct { - podsClient typedcorev1.PodInterface - listOpts metav1.ListOptions -} - -func NewPodWatcher( - podsClient typedcorev1.PodInterface, - listOpts metav1.ListOptions, -) PodWatcher { - return PodWatcher{podsClient, listOpts} -} - -func (w PodWatcher) Watch(podsToWatchCh chan corev1.Pod, cancelCh chan struct{}) error { - podsList, err := w.podsClient.List(context.TODO(), w.listOpts) - if err != nil { - return err - } - - for _, pod := range podsList.Items { - podsToWatchCh <- pod - } - - // Return before potentially getting any events - select { - case <-cancelCh: - return nil - default: - } - - for { - retry, err := w.watch(podsToWatchCh, cancelCh) - if err != nil { - return err - } - if !retry { - return nil - } - } -} - -func (w PodWatcher) watch(podsToWatchCh chan corev1.Pod, cancelCh chan struct{}) (bool, error) { - watcher, err := w.podsClient.Watch(context.TODO(), w.listOpts) - if err != nil { - return false, fmt.Errorf("Creating Pod watcher: %w", err) - } - - defer watcher.Stop() - - for { - select { - case e, ok := <-watcher.ResultChan(): - if !ok || e.Object == nil { - // Watcher may expire, hence try to retry - return true, nil - } - - pod, ok := e.Object.(*corev1.Pod) - if !ok { - continue - } - - switch e.Type { - case watch.Added: - podsToWatchCh <- *pod - } - - case <-cancelCh: - return false, nil - } - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/refs.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/refs.go deleted file mode 100644 index 7cffa8fa7..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/refs.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type ResourceRef struct { - schema.GroupVersionResource -} - -type PartialResourceRef struct { - schema.GroupVersionResource -} - -func (r PartialResourceRef) Matches(other schema.GroupVersionResource) bool { - s := r.GroupVersionResource - - // TODO: support matching on Group+Resource - // so that, for example, SpecificResFactory's can fine-tune which resources - // are fetched. - switch { - case len(s.Version) > 0 && len(s.Resource) > 0: - return s == other - case len(s.Resource) > 0: - return s.Group == other.Group && s.Resource == other.Resource - case len(s.Version) > 0: - return s.Group == other.Group && s.Version == other.Version - case len(s.Version) == 0 && len(s.Resource) == 0: - return s.Group == other.Group - default: - return false - } -} - -type GKResourceRef struct { - schema.GroupKind -} - -func (r GKResourceRef) Matches(other ResourceType) bool { - return r.Group == other.APIResource.Group && r.Kind == other.Kind -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/resource.go deleted file mode 100644 index fd3c83bf2..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/yaml" -) - -type Resource interface { - GroupVersionResource() schema.GroupVersionResource - GroupVersion() schema.GroupVersion - GroupKind() schema.GroupKind - Kind() string - APIVersion() string - APIGroup() string - - Namespace() string - SetNamespace(name string) - RemoveNamespace() - - Name() string - SetName(name string) - Description() string - - Annotations() map[string]string - Labels() map[string]string - Finalizers() []string - OwnerRefs() []metav1.OwnerReference - Status() map[string]interface{} - - CreatedAt() time.Time - IsProvisioned() bool - IsDeleting() bool - UID() string - - Equal(res Resource) bool - DeepCopy() Resource - DeepCopyRaw() map[string]interface{} - DeepCopyIntoFrom(res Resource) - AsYAMLBytes() ([]byte, error) - AsCompactBytes() ([]byte, error) - AsTypedObj(obj interface{}) error - AsUncheckedTypedObj(obj interface{}) error - - Debug(string) - - SetOrigin(string) - Origin() string - - MarkTransient(bool) - Transient() bool - - UnstructuredObject() map[string]interface{} - - unstructured() unstructured.Unstructured // private - unstructuredPtr() *unstructured.Unstructured // private - setUnstructured(unstructured.Unstructured) // private -} - -type ResourceImpl struct { - un unstructured.Unstructured - resType ResourceType - transient bool - origin string -} - -var _ Resource = &ResourceImpl{} - -func NewResourceUnstructured(un unstructured.Unstructured, resType ResourceType) *ResourceImpl { - return &ResourceImpl{un: un, resType: resType} -} - -func NewResourceFromBytes(data []byte) (*ResourceImpl, error) { - var content map[string]interface{} - - err := yaml.Unmarshal(data, &content) - if err != nil { - return nil, err - } - - if len(content) == 0 { - return nil, nil - } - - return &ResourceImpl{un: unstructured.Unstructured{content}}, nil -} - -func MustNewResourceFromBytes(data []byte) *ResourceImpl { - res, err := NewResourceFromBytes(data) - if err != nil { - panic(fmt.Sprintf("Invalid resource: %s", err)) - } - if res == nil { - panic(fmt.Sprintf("Empty resource: %s", err)) - } - return res -} - -func NewResourcesFromBytes(data []byte) ([]Resource, error) { - var rs []Resource - var content map[string]interface{} - - err := yaml.Unmarshal(data, &content) - if err != nil { - return nil, err - } - - if len(content) == 0 { - return nil, nil - } - - un := unstructured.Unstructured{content} - - if un.IsList() { - list, err := un.ToList() - if err != nil { - return nil, err - } - - for _, itemUn := range list.Items { - rs = append(rs, &ResourceImpl{un: itemUn}) - } - } else { - rs = append(rs, &ResourceImpl{un: un}) - } - - return rs, nil -} - -func (r *ResourceImpl) GroupVersionResource() schema.GroupVersionResource { - return r.resType.GroupVersionResource -} - -func (r *ResourceImpl) GroupKind() schema.GroupKind { - return r.un.GroupVersionKind().GroupKind() -} - -func (r *ResourceImpl) GroupVersion() schema.GroupVersion { - pieces := strings.Split(r.APIVersion(), "/") - if len(pieces) > 2 { - panic(fmt.Errorf("Expected version to be of format group/version: was %s", r.APIVersion())) // TODO panic - } - if len(pieces) == 1 { - return schema.GroupVersion{Group: "", Version: pieces[0]} - } - return schema.GroupVersion{Group: pieces[0], Version: pieces[1]} -} - -func (r *ResourceImpl) Kind() string { return r.un.GetKind() } -func (r *ResourceImpl) APIVersion() string { return r.un.GetAPIVersion() } - -func (r *ResourceImpl) APIGroup() string { - return r.GroupVersion().Group -} - -func (r *ResourceImpl) Namespace() string { return r.un.GetNamespace() } -func (r *ResourceImpl) SetNamespace(name string) { r.un.SetNamespace(name) } - -func (r *ResourceImpl) RemoveNamespace() { - unstructured.RemoveNestedField(r.un.Object, "metadata", "namespace") -} - -func (r *ResourceImpl) Name() string { - name := r.un.GetName() - if len(name) > 0 { - return name - } - genName := r.un.GetGenerateName() - if len(genName) > 0 { - return genName + "*" - } - return "" -} - -func (r *ResourceImpl) SetName(name string) { r.un.SetName(name) } - -func (r *ResourceImpl) Description() string { - // TODO proper kind to resource conversion - result := fmt.Sprintf("%s/%s (%s)", strings.ToLower(r.Kind()), r.Name(), r.APIVersion()) - - if len(r.Namespace()) > 0 { - result += " namespace: " + r.Namespace() - } else { - result += " cluster" - } - - return result -} - -func (r *ResourceImpl) CreatedAt() time.Time { return r.un.GetCreationTimestamp().Time } -func (r *ResourceImpl) UID() string { return string(r.un.GetUID()) } - -func (r *ResourceImpl) IsProvisioned() bool { - // metrics.k8s.io/PodMetrics for example did not have a UID set - // TODO may be better to rely on selfLink? - return len(r.un.GetUID()) > 0 || !r.CreatedAt().IsZero() -} - -func (r *ResourceImpl) IsDeleting() bool { return r.un.GetDeletionTimestamp() != nil } - -func (r *ResourceImpl) MarkTransient(transient bool) { r.transient = transient } -func (r *ResourceImpl) Transient() bool { return r.transient } - -func (r *ResourceImpl) Annotations() map[string]string { return r.un.GetAnnotations() } -func (r *ResourceImpl) Labels() map[string]string { return r.un.GetLabels() } -func (r *ResourceImpl) OwnerRefs() []metav1.OwnerReference { return r.un.GetOwnerReferences() } -func (r *ResourceImpl) Finalizers() []string { return r.un.GetFinalizers() } - -func (r *ResourceImpl) Status() map[string]interface{} { - if r.un.Object != nil { - if status, ok := r.un.Object["status"]; ok { - if typedStatus, ok := status.(map[string]interface{}); ok { - return typedStatus - } - } - } - return nil -} - -func (r *ResourceImpl) Equal(res Resource) bool { - if typedRes, ok := res.(*ResourceImpl); ok { - return reflect.DeepEqual(r.un, typedRes.un) - } - panic("Resource#Equal only supports ResourceImpl") -} - -func (r *ResourceImpl) DeepCopy() Resource { - return &ResourceImpl{*r.un.DeepCopy(), r.resType, r.transient, ""} -} - -func (r *ResourceImpl) DeepCopyRaw() map[string]interface{} { - return r.un.DeepCopy().UnstructuredContent() -} - -func (r *ResourceImpl) DeepCopyIntoFrom(res Resource) { - r.setUnstructured(unstructured.Unstructured{res.DeepCopyRaw()}) -} - -func (r *ResourceImpl) AsYAMLBytes() ([]byte, error) { - return yaml.Marshal(r.un.Object) -} - -func (r *ResourceImpl) AsCompactBytes() ([]byte, error) { - // For larger resources (especially very indented ones), - // JSON representation seems to be more space effecient. - // It's also chosed by kubectl's last-applied-configuration annotation. - // (https://github.com/carvel-dev/kapp/issues/48). - return json.Marshal(r.un.Object) -} - -func (r *ResourceImpl) AsTypedObj(obj interface{}) error { - return scheme.Scheme.Convert(r.unstructuredPtr(), obj, nil) -} - -func (r *ResourceImpl) AsUncheckedTypedObj(obj interface{}) error { - jsonBs, err := json.Marshal(r.un.Object) - if err != nil { - return err - } - return json.Unmarshal(jsonBs, obj) -} - -func (r *ResourceImpl) Debug(title string) { - bs, err := r.AsYAMLBytes() - if err != nil { - panic("Unexpected failure to serialize resource") - } - fmt.Printf("%s (%s):\n%s\n", title, r.Description(), bs) -} - -func (r *ResourceImpl) SetOrigin(origin string) { r.origin = origin } -func (r *ResourceImpl) Origin() string { return r.origin } - -func (r *ResourceImpl) UnstructuredObject() map[string]interface{} { return r.un.Object } - -func (r *ResourceImpl) unstructured() unstructured.Unstructured { return r.un } -func (r *ResourceImpl) unstructuredPtr() *unstructured.Unstructured { return &r.un } -func (r *ResourceImpl) setUnstructured(un unstructured.Unstructured) { r.un = un } diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_filter.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_filter.go deleted file mode 100644 index f2bd7a0a0..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_filter.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "encoding/json" - "fmt" - "time" - - "carvel.dev/kapp/pkg/kapp/matcher" // TODO inject - "k8s.io/apimachinery/pkg/labels" -) - -type ResourceFilter struct { - CreatedAtBeforeTime *time.Time - CreatedAtAfterTime *time.Time - - Kinds []string - Namespaces []string - Names []string - KindNames []string - KindNamespaces []string - KindNsNames []string - Labels []string - - BoolFilter *BoolFilter `json:"-"` -} - -func (f ResourceFilter) Apply(resources []Resource) []Resource { - var result []Resource - - for _, resource := range resources { - if f.Matches(resource) { - result = append(result, resource) - } - } - - return result -} - -func (f ResourceFilter) Matches(resource Resource) bool { - if f.BoolFilter != nil { - return f.BoolFilter.Matches(resource) - } - - if f.CreatedAtBeforeTime != nil { - if resource.CreatedAt().After(*f.CreatedAtBeforeTime) { - return false - } - } - - if f.CreatedAtAfterTime != nil { - if resource.CreatedAt().Before(*f.CreatedAtAfterTime) { - return false - } - } - - if len(f.Kinds) > 0 { - var matched bool - for _, kind := range f.Kinds { - if matcher.NewStringMatcher(kind).Matches(resource.Kind()) { - matched = true - break - } - } - if !matched { - return false - } - } - - if len(f.Namespaces) > 0 { - var matched bool - for _, ns := range f.Namespaces { - if matcher.NewStringMatcher(ns).Matches(resource.Namespace()) { - matched = true - break - } - } - if !matched { - return false - } - } - - if len(f.Names) > 0 { - var matched bool - for _, name := range f.Names { - if matcher.NewStringMatcher(name).Matches(resource.Name()) { - matched = true - break - } - } - if !matched { - return false - } - } - - if len(f.Labels) > 0 { - var matched bool - for _, label := range f.Labels { - labelSelector, err := labels.Parse(label) - if err != nil { - panic(fmt.Sprintf("Parsing label selector failed: %s", err)) - } - if labelSelector.Matches(labels.Set(resource.Labels())) { - matched = true - break - } - } - if !matched { - return false - } - } - - if len(f.KindNames) > 0 { - key := resource.Kind() + "/" + resource.Name() - var matched bool - for _, k := range f.KindNames { - if key == k { - matched = true - break - } - } - if !matched { - return false - } - } - - if len(f.KindNamespaces) > 0 { - key := resource.Kind() + "/" + resource.Namespace() - var matched bool - for _, k := range f.KindNamespaces { - if key == k { - matched = true - break - } - } - if !matched { - return false - } - } - - if len(f.KindNsNames) > 0 { - key := resource.Kind() + "/" + resource.Namespace() + "/" + resource.Name() - var matched bool - for _, k := range f.KindNsNames { - if key == k { - matched = true - break - } - } - if !matched { - return false - } - } - - return true -} - -type BoolFilter struct { - And []BoolFilter - Or []BoolFilter - Not *BoolFilter - Resource *ResourceFilter -} - -func NewBoolFilterFromString(data string) (*BoolFilter, error) { - var filter BoolFilter - - err := json.Unmarshal([]byte(data), &filter) - if err != nil { - return nil, err - } - - return &filter, nil -} - -func (m BoolFilter) Matches(res Resource) bool { - if len(m.And) > 0 { - for _, m2 := range m.And { - if !m2.Matches(res) { - return false - } - } - return true - } - - if len(m.Or) > 0 { - for _, m2 := range m.Or { - if m2.Matches(res) { - return true - } - } - return false - } - - if m.Not != nil { - return !m.Not.Matches(res) - } - - if m.Resource != nil { - return m.Resource.Matches(res) - } - - return false -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_types.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_types.go deleted file mode 100644 index 8009268c5..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_types.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "strings" - "sync" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes" -) - -type ResourceTypes interface { - All(ignoreCachedResTypes bool) ([]ResourceType, error) - Find(Resource) (ResourceType, error) - CanIgnoreFailingGroupVersion(schema.GroupVersion) bool -} - -type ResourceTypesImplOpts struct { - IgnoreFailingAPIServices bool - CanIgnoreFailingAPIService func(schema.GroupVersion) bool -} - -type ResourceTypesImpl struct { - coreClient kubernetes.Interface - opts ResourceTypesImplOpts - - memoizedResTypes *[]ResourceType - memoizedResTypesLock sync.RWMutex -} - -var _ ResourceTypes = &ResourceTypesImpl{} - -type ResourceType struct { - schema.GroupVersionResource - metav1.APIResource -} - -func NewResourceTypesImpl(coreClient kubernetes.Interface, opts ResourceTypesImplOpts) *ResourceTypesImpl { - return &ResourceTypesImpl{coreClient: coreClient, opts: opts} -} - -func (g *ResourceTypesImpl) All(ignoreCachedResTypes bool) ([]ResourceType, error) { - if ignoreCachedResTypes { - // TODO Update cache while doing a fresh fetch - return g.all() - } - return g.memoizedAll() -} - -func (g *ResourceTypesImpl) all() ([]ResourceType, error) { - serverResources, err := g.serverResources() - if err != nil { - return nil, err - } - - var pairs []ResourceType - - for _, resList := range serverResources { - groupVersion, err := schema.ParseGroupVersion(resList.GroupVersion) - if err != nil { - return nil, err - } - - for _, res := range resList.APIResources { - group := groupVersion.Group - if len(res.Group) > 0 { - group = res.Group - } - - version := groupVersion.Version - if len(res.Version) > 0 { - version = res.Version - } - - // Copy down group and version for convenience - res.Group = group - res.Version = version - - gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: res.Name} - pairs = append(pairs, ResourceType{gvr, res}) - } - } - - return pairs, nil -} - -func (g *ResourceTypesImpl) CanIgnoreFailingGroupVersion(groupVer schema.GroupVersion) bool { - return g.canIgnoreFailingGroupVersions(map[schema.GroupVersion]error{groupVer: nil}) -} - -func (g *ResourceTypesImpl) canIgnoreFailingGroupVersions(groupVers map[schema.GroupVersion]error) bool { - // If groups that are failing do not relate to our resources - // it's ok to ignore them. Still not ideal but not much else - // we can do with the way kubernetes exposes this functionality. - if g.opts.IgnoreFailingAPIServices { - return true - } - if g.opts.CanIgnoreFailingAPIService != nil { - for groupVer := range groupVers { - if !g.opts.CanIgnoreFailingAPIService(groupVer) { - return false - } - } - return true - } - return false -} - -func (g *ResourceTypesImpl) serverResources() ([]*metav1.APIResourceList, error) { - var serverResources []*metav1.APIResourceList - var lastErr error - - for i := 0; i < 10; i++ { - _, serverResources, lastErr = g.coreClient.Discovery().ServerGroupsAndResources() - if lastErr == nil { - return serverResources, nil - } else if typedLastErr, ok := lastErr.(*discovery.ErrGroupDiscoveryFailed); ok { - if len(serverResources) > 0 && g.canIgnoreFailingGroupVersions(typedLastErr.Groups) { - return serverResources, nil - } - // Even local services may not be Available immediately, so retry - lastErr = fmt.Errorf("%w (possibly related issue: https://github.com/carvel-dev/kapp/issues/12)", lastErr) - } - time.Sleep(1 * time.Second) - } - - return nil, lastErr -} - -func (g *ResourceTypesImpl) memoizedAll() ([]ResourceType, error) { - g.memoizedResTypesLock.RLock() - - if g.memoizedResTypes != nil { - defer g.memoizedResTypesLock.RUnlock() - return *g.memoizedResTypes, nil - } - - g.memoizedResTypesLock.RUnlock() - - // Include call to All within a lock to avoid race - // with competing memoizedAll() call that - // may win and save older copy on res types - g.memoizedResTypesLock.Lock() - defer g.memoizedResTypesLock.Unlock() - - resTypes, err := g.all() - if err != nil { - return nil, err - } - - g.memoizedResTypes = &resTypes - return resTypes, nil -} - -func (g *ResourceTypesImpl) Find(resource Resource) (ResourceType, error) { - resType, err := g.findOnce(resource) - if err != nil { - g.memoizedResTypesLock.Lock() - g.memoizedResTypes = nil - g.memoizedResTypesLock.Unlock() - - return g.findOnce(resource) - } - - return resType, nil -} - -type ResourceTypesUnknownTypeErr struct { - resource Resource -} - -func (e ResourceTypesUnknownTypeErr) Error() string { - return "Expected to find type for resource: " + e.resource.Description() -} - -func (g *ResourceTypesImpl) findOnce(resource Resource) (ResourceType, error) { - pairs, err := g.memoizedAll() - if err != nil { - return ResourceType{}, err - } - - pieces := strings.Split(resource.APIVersion(), "/") - if len(pieces) > 2 { - return ResourceType{}, fmt.Errorf("Expected version to be of format group/version") - } - if len(pieces) == 1 { - pieces = []string{"", pieces[0]} // core API group - } - - for _, pair := range pairs { - if pair.APIResource.Group == pieces[0] && - pair.APIResource.Version == pieces[1] && - pair.APIResource.Kind == resource.Kind() { - return pair, nil - } - } - - return ResourceType{}, ResourceTypesUnknownTypeErr{resource} -} - -func (p ResourceType) Namespaced() bool { - return p.APIResource.Namespaced -} - -func (p ResourceType) Listable() bool { - return p.containsStr(p.APIResource.Verbs, "list") -} - -func (p ResourceType) Deletable() bool { - return p.containsStr(p.APIResource.Verbs, "delete") -} - -func (p ResourceType) containsStr(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} - -func Listable(in []ResourceType) []ResourceType { - var out []ResourceType - for _, item := range in { - if item.Listable() { - out = append(out, item) - } - } - return out -} - -func Matching(in []ResourceType, ref ResourceRef) []ResourceType { - partResourceRef := PartialResourceRef{ref.GroupVersionResource} - var out []ResourceType - for _, item := range in { - if partResourceRef.Matches(item.GroupVersionResource) { - out = append(out, item) - } - } - return out -} - -func MatchingAny(in []ResourceType, refs []ResourceRef) []ResourceType { - var out []ResourceType - for _, item := range in { - for _, ref := range refs { - if (PartialResourceRef{ref.GroupVersionResource}).Matches(item.GroupVersionResource) { - out = append(out, item) - break - } - } - } - return out -} - -func NonMatching(in []ResourceType, ref ResourceRef) []ResourceType { - partResourceRef := PartialResourceRef{ref.GroupVersionResource} - var out []ResourceType - for _, item := range in { - if !partResourceRef.Matches(item.GroupVersionResource) { - out = append(out, item) - } - } - return out -} - -func NonMatchingGK(in []ResourceType, gk schema.GroupKind) []ResourceType { - var out []ResourceType - for _, item := range in { - if !(GKResourceRef{gk}).Matches(item) { - out = append(out, item) - } - } - return out -} - -// TODO: Extend ResourceRef and PartialResourceRefd to allow GVK matching -func MatchingAnyGK(in []ResourceType, gks []schema.GroupKind) []ResourceType { - var out []ResourceType - for _, item := range in { - for _, gk := range gks { - if (GKResourceRef{gk}).Matches(item) { - out = append(out, item) - } - } - } - return out -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_with_managed_fields.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_with_managed_fields.go deleted file mode 100644 index d385dd537..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/resource_with_managed_fields.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -type ResourceWithManagedFields struct { - res Resource - managedFields bool -} - -func NewResourceWithManagedFields(res Resource, managedFields bool) ResourceWithManagedFields { - return ResourceWithManagedFields{res: res, managedFields: managedFields} -} - -func (r ResourceWithManagedFields) Resource() (Resource, error) { - res := r.res.DeepCopy() - if r.managedFields { - return res, nil - } - err := r.removeManagedFieldsResMods().Apply(res) - if err != nil { - return nil, err - } - return res, nil -} - -func (ResourceWithManagedFields) removeManagedFieldsResMods() ResourceMod { - return FieldRemoveMod{ - ResourceMatcher: AllMatcher{}, - Path: NewPathFromStrings([]string{"metadata", "managedFields"}), - } -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/resources.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/resources.go deleted file mode 100644 index 8303431cd..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/resources.go +++ /dev/null @@ -1,640 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "context" - "fmt" - "regexp" - "strings" - "sync" - "time" - - "carvel.dev/kapp/pkg/kapp/logger" - "carvel.dev/kapp/pkg/kapp/util" - "golang.org/x/net/http2" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" -) - -// type ResourceInterface interface { -// Create(obj *unstructured.Unstructured, subresources ...string) (*unstructured.Unstructured, error) -// Update(obj *unstructured.Unstructured, subresources ...string) (*unstructured.Unstructured, error) -// UpdateStatus(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) -// Delete(name string, options *metav1.DeleteOptions, subresources ...string) error -// DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error -// Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) -// List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) -// Watch(opts metav1.ListOptions) (watch.Interface, error) -// Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*unstructured.Unstructured, error) -// } - -const ( - resourcesDebug = false -) - -type Resources interface { - All([]ResourceType, AllOpts) ([]Resource, error) - Delete(Resource) error - Exists(Resource, ExistsOpts) (Resource, bool, error) - Get(Resource) (Resource, error) - Patch(Resource, types.PatchType, []byte) (Resource, error) - Update(Resource) (Resource, error) - Create(resource Resource) (Resource, error) -} - -type ExistsOpts struct { - SameUID bool -} - -type ResourcesImpl struct { - resourceTypes ResourceTypes - coreClient kubernetes.Interface - dynamicClient dynamic.Interface - mutedDynamicClient dynamic.Interface - opts ResourcesImplOpts - - assumedAllowedNamespacesMemoLock sync.Mutex - assumedAllowedNamespacesMemo *[]string - - logger logger.Logger -} - -type ResourcesImplOpts struct { - FallbackAllowedNamespaces []string - ScopeToFallbackAllowedNamespaces bool -} - -func NewResourcesImpl(resourceTypes ResourceTypes, coreClient kubernetes.Interface, - dynamicClient dynamic.Interface, mutedDynamicClient dynamic.Interface, - opts ResourcesImplOpts, logger logger.Logger) *ResourcesImpl { - - return &ResourcesImpl{ - resourceTypes: resourceTypes, - coreClient: coreClient, - dynamicClient: dynamicClient, - mutedDynamicClient: mutedDynamicClient, - opts: opts, - logger: logger.NewPrefixed("Resources"), - } -} - -type unstructItems struct { - ResType ResourceType - Items []unstructured.Unstructured -} - -func (c *ResourcesImpl) All(resTypes []ResourceType, opts AllOpts) ([]Resource, error) { - defer c.logger.DebugFunc("All").Finish() - - if opts.ListOpts == nil { - opts.ListOpts = &metav1.ListOptions{} - } - - // Populate FallbackAllowedNamespace with resource namespaces stored during deploy - c.opts.FallbackAllowedNamespaces = uniqAndValidNamespaces(append(c.opts.FallbackAllowedNamespaces, opts.ResourceNamespaces...)) - - unstructItemsCh := make(chan unstructItems, len(resTypes)) - fatalErrsCh := make(chan error, len(resTypes)) - var itemsDone sync.WaitGroup - - for _, resType := range resTypes { - resType := resType // copy - itemsDone.Add(1) - - go func() { - defer itemsDone.Done() - - defer c.logger.DebugFunc(resType.GroupVersionResource.String()).Finish() - - var list *unstructured.UnstructuredList - var err error - - client := c.mutedDynamicClient.Resource(resType.GroupVersionResource) - - // If resource is cluster scoped or request is not scoped to fallback - // allowed namespaces manually, then scope list to all namespaces - if !c.opts.ScopeToFallbackAllowedNamespaces || !resType.Namespaced() { - err = util.Retry2(time.Second, 5*time.Second, c.isServerRescaleErr, func() error { - if resType.Namespaced() { - list, err = client.Namespace("").List(context.TODO(), *opts.ListOpts) - } else { - list, err = client.List(context.TODO(), *opts.ListOpts) - } - return err - }) - - if err == nil { - unstructItemsCh <- unstructItems{resType, list.Items} - return - } - - if !errors.IsForbidden(err) { - // Ignore certain GVs due to failing API backing - if c.resourceTypes.CanIgnoreFailingGroupVersion(resType.GroupVersion()) { - c.logger.Info("Ignoring group version: %#v: %s", resType.GroupVersionResource, err) - } else { - fatalErrsCh <- fmt.Errorf("Listing %#v, namespaced: %t: %w", resType.GroupVersionResource, resType.Namespaced(), err) - } - return - } - - if !resType.Namespaced() { - c.logger.Debug("Skipping forbidden group version: %#v", resType.GroupVersionResource) - return - } - } - - // At this point err==Forbidden... - // or requests are scoped to fallback allowed namespaces manually - list, err = c.allForNamespaces(client, opts.ListOpts) - if err != nil { - // Ignore certain GVs due to failing API backing - if c.resourceTypes.CanIgnoreFailingGroupVersion(resType.GroupVersion()) { - c.logger.Info("Ignoring group version: %#v", resType.GroupVersionResource) - } else { - fatalErrsCh <- fmt.Errorf("Listing %#v, namespaced: %t: %w", resType.GroupVersionResource, resType.Namespaced(), err) - } - return - } - - unstructItemsCh <- unstructItems{resType, list.Items} - }() - } - - itemsDone.Wait() - close(unstructItemsCh) - close(fatalErrsCh) - - for err := range fatalErrsCh { - return nil, err // TODO consolidate - } - - var resources []Resource - - for unstructItem := range unstructItemsCh { - for _, item := range unstructItem.Items { - resources = append(resources, NewResourceUnstructured(item, unstructItem.ResType)) - } - } - - return resources, nil -} - -func (c *ResourcesImpl) allForNamespaces(client dynamic.NamespaceableResourceInterface, listOpts *metav1.ListOptions) (*unstructured.UnstructuredList, error) { - defer c.logger.DebugFunc("allForNamespaces").Finish() - - allowedNs, err := c.assumedAllowedNamespaces() - if err != nil { - return nil, err - } - - var itemsDone sync.WaitGroup - fatalErrsCh := make(chan error, len(allowedNs)) - unstructItemsCh := make(chan *unstructured.UnstructuredList, len(allowedNs)) - - for _, ns := range allowedNs { - ns := ns // copy - itemsDone.Add(1) - - go func() { - defer itemsDone.Done() - var resList *unstructured.UnstructuredList - var err error - - err = util.Retry2(time.Second, 5*time.Second, c.isServerRescaleErr, func() error { - resList, err = client.Namespace(ns).List(context.TODO(), *listOpts) - return err - }) - if err != nil { - if !errors.IsForbidden(err) { - fatalErrsCh <- err - return - } - // Ignore forbidden errors - // TODO somehow surface them - } else { - unstructItemsCh <- resList - } - }() - } - - itemsDone.Wait() - close(fatalErrsCh) - close(unstructItemsCh) - - for fatalErr := range fatalErrsCh { - return nil, fatalErr - } - - list := &unstructured.UnstructuredList{} - - for resList := range unstructItemsCh { - list.Items = append(list.Items, resList.Items...) - } - - return list, nil -} - -func (c *ResourcesImpl) Create(resource Resource) (Resource, error) { - if resourcesDebug { - t1 := time.Now().UTC() - defer func() { c.logger.Debug("create %s", time.Now().UTC().Sub(t1)) }() - - bs, _ := resource.AsYAMLBytes() - c.logger.Debug("create resource %s\n%s\n", resource.Description(), bs) - } - - resClient, resType, err := c.resourceClient(resource, resourceClientOpts{Warnings: true}) - if err != nil { - return nil, err - } - - var createdUn *unstructured.Unstructured - - err = util.Retry2(time.Second, 5*time.Second, c.isGeneralRetryableErr, func() error { - createdUn, err = resClient.Create(context.TODO(), resource.unstructuredPtr(), metav1.CreateOptions{}) - return err - }) - if err != nil { - return nil, c.resourceErr(err, "Creating", resource) - } - - return NewResourceUnstructured(*createdUn, resType), nil -} - -func (c *ResourcesImpl) Update(resource Resource) (Resource, error) { - if resourcesDebug { - t1 := time.Now().UTC() - defer func() { c.logger.Debug("update %s", time.Now().UTC().Sub(t1)) }() - - bs, _ := resource.AsYAMLBytes() - c.logger.Debug("update resource %s\n%s\n", resource.Description(), bs) - } - - resClient, resType, err := c.resourceClient(resource, resourceClientOpts{Warnings: true}) - if err != nil { - return nil, err - } - - var updatedUn *unstructured.Unstructured - - err = util.Retry2(time.Second, 5*time.Second, c.isGeneralRetryableErr, func() error { - updatedUn, err = resClient.Update(context.TODO(), resource.unstructuredPtr(), metav1.UpdateOptions{}) - return err - }) - if err != nil { - return nil, c.resourceErr(err, "Updating", resource) - } - - return NewResourceUnstructured(*updatedUn, resType), nil -} - -func (c *ResourcesImpl) Patch(resource Resource, patchType types.PatchType, data []byte) (Resource, error) { - if resourcesDebug { - t1 := time.Now().UTC() - defer func() { c.logger.Debug("patch %s", time.Now().UTC().Sub(t1)) }() - } - - resClient, resType, err := c.resourceClient(resource, resourceClientOpts{Warnings: true}) - if err != nil { - return nil, err - } - - var patchedUn *unstructured.Unstructured - - err = util.Retry2(time.Second, 5*time.Second, c.isGeneralRetryableErr, func() error { - patchedUn, err = resClient.Patch(context.TODO(), resource.Name(), patchType, data, metav1.PatchOptions{}) - return err - }) - if err != nil { - return nil, c.resourceErr(err, "Patching", resource) - } - - return NewResourceUnstructured(*patchedUn, resType), nil -} - -func (c *ResourcesImpl) Delete(resource Resource) error { - if resourcesDebug { - t1 := time.Now().UTC() - defer func() { c.logger.Debug("delete %s", time.Now().UTC().Sub(t1)) }() - } - - if resource.IsDeleting() { - c.logger.Info("TODO resource '%s' is already deleting", resource.Description()) - return nil - } - - resClient, resType, err := c.resourceClient(resource, resourceClientOpts{Warnings: true}) - if err != nil { - return err - } - - if resType.Deletable() { - // TODO is setting deletion policy a correct thing to do? - // https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#setting-the-cascading-deletion-policy - delPol := metav1.DeletePropagationBackground - delOpts := metav1.DeleteOptions{PropagationPolicy: &delPol} - - // Some resources may not have UID (example: PodMetrics.metrics.k8s.io) - resUID := types.UID(resource.UID()) - if len(resUID) > 0 { - delOpts.Preconditions = &metav1.Preconditions{UID: &resUID} - } - - err = resClient.Delete(context.TODO(), resource.Name(), delOpts) - if err != nil { - if errors.IsNotFound(err) { - c.logger.Info("TODO resource '%s' is already gone", resource.Description()) - return nil - } - if c.isPodMetrics(resource, err) { - return nil - } - return c.resourceErr(err, "Deleting", resource) - } - } else { - c.logger.Info("TODO resource '%s' is not deletable", resource.Description()) // TODO - } - - return nil -} - -func (c *ResourcesImpl) Get(resource Resource) (Resource, error) { - if resourcesDebug { - t1 := time.Now().UTC() - defer func() { c.logger.Debug("get %s", time.Now().UTC().Sub(t1)) }() - } - - resClient, resType, err := c.resourceClient(resource, resourceClientOpts{Warnings: false}) - if err != nil { - return nil, err - } - - var item *unstructured.Unstructured - - err = util.Retry2(time.Second, 5*time.Second, c.isServerRescaleErr, func() error { - var err error - item, err = resClient.Get(context.TODO(), resource.Name(), metav1.GetOptions{}) - return err - }) - if err != nil { - return nil, c.resourceErr(err, "Getting", resource) - } - - return NewResourceUnstructured(*item, resType), nil -} - -func (c *ResourcesImpl) Exists(resource Resource, existsOpts ExistsOpts) (Resource, bool, error) { - if resourcesDebug { - t1 := time.Now().UTC() - defer func() { c.logger.Debug("exists %s", time.Now().UTC().Sub(t1)) }() - } - - resClient, resType, err := c.resourceClient(resource, resourceClientOpts{Warnings: false}) - if err != nil { - // Assume if type is not known to the API server - // then such resource cannot exist on the server - if _, ok := err.(ResourceTypesUnknownTypeErr); ok { - return nil, false, nil - } - return nil, false, err - } - - var found bool - var resObj Resource - - err = util.Retry(time.Second, time.Minute, func() (bool, error) { - fetchedRes, err := resClient.Get(context.TODO(), resource.Name(), metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - found = false - return true, nil - } - if c.isPodMetrics(resource, err) { - found = false - return true, nil - } - if c.isServerRescaleErr(err) { - return false, nil - } - // No point in waiting if we are not allowed to get it - isDone := errors.IsForbidden(err) - // TODO sometimes metav1.StatusReasonUnknown is returned (empty string) - // might be related to deletion of mutating webhook - return isDone, c.resourceErr(err, "Checking existence of", resource) - } - - // Check if we have to compare the UID's also to confirm if it is same resource. - if existsOpts.SameUID { - if fetchedRes != nil { - if string(fetchedRes.GetUID()) != resource.UID() { - found = false - return true, nil - } - } - } - - found = true - resObj = NewResourceUnstructured(*fetchedRes, resType) - return true, nil - }) - - return resObj, found, err -} - -var ( - // Error example: Checking existence of resource podmetrics/knative-ingressgateway-646d475cbb-c82qb (metrics.k8s.io/v1beta1) - // namespace: istio-system: Error while getting pod knative-ingressgateway-646d475cbb-c82qb: - // pod "knative-ingressgateway-646d475cbb-c82qb" not found (reason: ) - // Note that it says pod is not found even though we were checking on podmetrics. - // (https://github.com/kubernetes-sigs/metrics-server/blob/8d7aca3c6d770bc37d93515bf731a08332b8025b/pkg/api/pod.go#L133) - podMetricsNotFoundErrCheck = regexp.MustCompile("Error while getting pod (.+) not found \\(reason: \\)") -) - -func (c *ResourcesImpl) isPodMetrics(resource Resource, err error) bool { - // Abnormal error case. Get/Delete on PodMetrics may fail - // without NotFound reason due to its dependence on Pod existence - if resource.Kind() == "PodMetrics" && resource.APIGroup() == "metrics.k8s.io" { - if podMetricsNotFoundErrCheck.MatchString(err.Error()) { - return true - } - } - return false -} - -func (c *ResourcesImpl) isGeneralRetryableErr(err error) bool { - return IsResourceChangeBlockedErr(err) || c.isServerRescaleErr(err) || c.isEtcdRetryableError(err) || - c.isResourceQuotaConflict(err) || c.isInternalFailure(err) || errors.IsTooManyRequests(err) -} - -// Fixes issues I observed with GKE: -// Operation cannot be fulfilled on resourcequotas "gke-resource-quotas": the object has been modified; -// please apply your changes to the latest version and try again (reason: Conflict) -// Works around: https://github.com/kubernetes/kubernetes/issues/67761 by retrying. -func (c *ResourcesImpl) isResourceQuotaConflict(err error) bool { - return errors.IsConflict(err) && strings.Contains(err.Error(), "Operation cannot be fulfilled on resourcequota") -} - -func (c *ResourcesImpl) isServerRescaleErr(err error) bool { - switch err := err.(type) { - case *http2.GoAwayError: - return true - case *errors.StatusError: - if err.ErrStatus.Reason == metav1.StatusReasonServiceUnavailable { - return true - } - } - return false -} - -// Handles case pointed out in : https://github.com/carvel-dev/kapp/issues/258. -// An internal network error which might succeed on retrying. -func (c *ResourcesImpl) isInternalFailure(err error) bool { - switch err := err.(type) { - case *errors.StatusError: - if errors.IsInternalError(err) { - return true - } - } - return false -} - -func (c *ResourcesImpl) resourceErr(err error, action string, resource Resource) error { - if typedErr, ok := err.(errors.APIStatus); ok { - return resourceStatusErr{resourcePlainErr{err, action, resource}, typedErr.Status()} - } - return resourcePlainErr{err, action, resource} -} - -type resourceClientOpts struct { - Warnings bool -} - -func (c *ResourcesImpl) resourceClient(resource Resource, opts resourceClientOpts) (dynamic.ResourceInterface, ResourceType, error) { - resType, err := c.resourceTypes.Find(resource) - if err != nil { - return nil, ResourceType{}, err - } - - var dynamicClient dynamic.Interface - if opts.Warnings { - dynamicClient = c.dynamicClient - } else { - dynamicClient = c.mutedDynamicClient - } - - return dynamicClient.Resource(resType.GroupVersionResource).Namespace(resource.Namespace()), resType, nil -} - -func (c *ResourcesImpl) assumedAllowedNamespaces() ([]string, error) { - c.assumedAllowedNamespacesMemoLock.Lock() - defer c.assumedAllowedNamespacesMemoLock.Unlock() - - if c.assumedAllowedNamespacesMemo != nil { - return *c.assumedAllowedNamespacesMemo, nil - } - - if c.opts.ScopeToFallbackAllowedNamespaces { - return c.opts.FallbackAllowedNamespaces, nil - } - - nsList, err := c.coreClient.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - if errors.IsForbidden(err) { - if len(c.opts.FallbackAllowedNamespaces) > 0 { - return c.opts.FallbackAllowedNamespaces, nil - } - } - return nil, fmt.Errorf("Fetching all namespaces: %w", err) - } - - c.logger.Info("Falling back to checking each namespace separately (much slower)") - - var nsNames []string - - for _, ns := range nsList.Items { - nsNames = append(nsNames, ns.Name) - } - - c.assumedAllowedNamespacesMemo = &nsNames - - return nsNames, nil -} - -var ( - // Error example: conversion webhook for cert-manager.io/v1alpha3, Kind=Issuer failed: - // Post https://cert-manager-webhook.cert-manager.svc:443/convert?timeout=30s: - // x509: certificate signed by unknown authority (reason: ) - conversionWebhookErrCheck = regexp.MustCompile("conversion webhook for (.+) failed:") - - // Matches retryable etcdserver errors - // Comprehensive list of errors at : https://github.com/etcd-io/etcd/blob/main/server/etcdserver/errors.go - etcdserverRetryableErrCheck = regexp.MustCompile("etcdserver:(.+)(leader changed|timed out)") -) - -func IsResourceChangeBlockedErr(err error) bool { - // TODO is there a better way to detect these errors? - errMsg := err.Error() - switch { - case strings.Contains(errMsg, "Internal error occurred: failed calling admission webhook"): - return true - case strings.Contains(errMsg, "Internal error occurred: failed calling webhook"): - return true - case conversionWebhookErrCheck.MatchString(errMsg): - return true - default: - return false - } -} - -// Retries retryable errors thrown by etcd server. -// Addresses : https://github.com/carvel-dev/kapp/issues/106 -func (c *ResourcesImpl) isEtcdRetryableError(err error) bool { - return etcdserverRetryableErrCheck.MatchString(err.Error()) -} - -func uniqAndValidNamespaces(in []string) []string { - var out []string - if len(in) > 0 { - uniqNamespaces := map[string]struct{}{} - for _, ns := range in { - if _, exists := uniqNamespaces[ns]; !exists && ns != "(cluster)" { - out = append(out, ns) - uniqNamespaces[ns] = struct{}{} - } - } - } - return out -} - -type AllOpts struct { - ListOpts *metav1.ListOptions - ResourceNamespaces []string -} - -type resourceStatusErr struct { - err resourcePlainErr - status metav1.Status -} - -var _ errors.APIStatus = resourceStatusErr{} - -func (e resourceStatusErr) Error() string { return e.err.Error() } -func (e resourceStatusErr) Status() metav1.Status { return e.status } - -type resourcePlainErr struct { - err error - action string - resource Resource -} - -func (e resourcePlainErr) Error() string { - return fmt.Sprintf("%s resource %s: API server says: %s (reason: %s)", - e.action, e.resource.Description(), e.err, errors.ReasonForError(e.err)) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/unique_resources.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/unique_resources.go deleted file mode 100644 index e1671e6a5..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/unique_resources.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "fmt" - "strings" -) - -type UniqueResourceKey struct { - res Resource - customName string -} - -func NewUniqueResourceKey(res Resource) UniqueResourceKey { - return UniqueResourceKey{res, ""} -} - -func NewUniqueResourceKeyWithCustomName(res Resource, name string) UniqueResourceKey { - return UniqueResourceKey{res, name} -} - -func (k UniqueResourceKey) String() string { - // version of the resource is not included since it will change over time - // TODO technically resource group can be changed (true uniqueness is via UID) - name := k.res.Name() - if len(k.customName) > 0 { - name = k.customName - } - return k.res.Namespace() + "/" + k.res.APIGroup() + "/" + k.res.Kind() + "/" + name -} - -type UniqueResources struct { - resources []Resource -} - -func NewUniqueResources(resources []Resource) UniqueResources { - return UniqueResources{resources} -} - -func (r UniqueResources) Resources() ([]Resource, error) { - var result []Resource - var errs []error - - uniqRs := map[string]Resource{} - - for _, res := range r.resources { - resKey := NewUniqueResourceKey(res).String() - if uRes, found := uniqRs[resKey]; found { - // Check if duplicate resources are same - if !uRes.Equal(res) { - errs = append(errs, fmt.Errorf("Found resource '%s' multiple times with different content", res.Description())) - } - } else { - uniqRs[resKey] = res - result = append(result, res) - } - } - - return result, r.combinedErr(errs) -} - -func (r UniqueResources) Match(newResources []Resource) ([]Resource, error) { - var result []Resource - uniqRs := map[string]struct{}{} - - for _, res := range newResources { - uniqRs[NewUniqueResourceKey(res).String()] = struct{}{} - } - - for _, res := range r.resources { - resKey := NewUniqueResourceKey(res).String() - if _, found := uniqRs[resKey]; found { - result = append(result, res) - } - } - - return result, nil -} - -func (r UniqueResources) combinedErr(errs []error) error { - if len(errs) > 0 { - var msgs []string - for _, err := range errs { - msgs = append(msgs, "- "+err.Error()) - } - return fmt.Errorf("Uniqueness errors:\n%s", strings.Join(msgs, "\n")) - } - - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resources/yaml_doc.go b/vendor/carvel.dev/kapp/pkg/kapp/resources/yaml_doc.go deleted file mode 100644 index 9ba6df9ad..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resources/yaml_doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resources - -import ( - "bufio" - "bytes" - "io" - - kyaml "k8s.io/apimachinery/pkg/util/yaml" -) - -type YAMLFile struct { - fileSrc FileSource -} - -func NewYAMLFile(fileSrc FileSource) YAMLFile { - return YAMLFile{fileSrc} -} - -func (f YAMLFile) Docs() ([][]byte, error) { - var docs [][]byte - - fileBytes, err := f.fileSrc.Bytes() - if err != nil { - return nil, err - } - - reader := kyaml.NewYAMLReader(bufio.NewReaderSize(bytes.NewReader(fileBytes), 4096)) - - for { - docBytes, err := reader.Read() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - docs = append(docs, docBytes) - } - - return docs, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_extensions_vx_crd.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_extensions_vx_crd.go deleted file mode 100644 index d828930e0..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_extensions_vx_crd.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - ctlres "carvel.dev/kapp/pkg/kapp/resources" - "sigs.k8s.io/yaml" -) - -type APIExtensionsVxCRD struct { - resource ctlres.Resource -} - -func NewAPIExtensionsVxCRD(resource ctlres.Resource) *APIExtensionsVxCRD { - matcher := ctlres.APIGroupKindMatcher{ - APIGroup: "apiextensions.k8s.io", - Kind: "CustomResourceDefinition", - } - if matcher.Matches(resource) { - return &APIExtensionsVxCRD{resource} - } - return nil -} - -func (s APIExtensionsVxCRD) IsDoneApplying() DoneApplyState { - // CRD conditions: https://github.com/kubernetes/apiextensions-apiserver/blob/master/pkg/apis/apiextensions - allTrue, msg := Conditions{s.resource}.IsSelectedTrue([]string{"Established", "NamesAccepted"}) - return DoneApplyState{Done: allTrue, Successful: allTrue, Message: msg} -} - -func (s APIExtensionsVxCRD) contents() (crdObj, error) { - bs, err := s.resource.AsYAMLBytes() - if err != nil { - return crdObj{}, err - } - - var contents crdObj - - err = yaml.Unmarshal(bs, &contents) - if err != nil { - return crdObj{}, err - } - - return contents, nil -} - -// TODO use struct provided by the client -type crdObj struct { - Spec crdSpec `yaml:"spec"` -} - -type crdSpec struct { - Group string `yaml:"group"` - Scope string `yaml:"scope"` - Version string `yaml:"version"` - Versions []crdSpecVersion `yaml:"versions"` - Names crdSpecNames `yaml:"names"` -} - -type crdSpecVersion struct { - Name string `yaml:"name"` -} - -type crdSpecNames struct { - Kind string `yaml:"kind"` -} - -func (o crdObj) Versions() []string { - result := []string{} - - if len(o.Spec.Version) > 0 { - result = append(result, o.Spec.Version) - } - if len(o.Spec.Versions) > 0 { - for _, ver := range o.Spec.Versions { - result = append(result, ver.Name) - } - } - - return result -} - -func (s APIExtensionsVxCRD) Group() (crdGroup string, err error) { - crdObj, err := s.contents() - if err != nil { - return crdGroup, err - } - return crdObj.Spec.Group, err -} - -func (s APIExtensionsVxCRD) Kind() (crdName string, err error) { - crdObj, err := s.contents() - if err != nil { - return crdName, err - } - return crdObj.Spec.Names.Kind, err -} - -/* - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: builds.build.knative.dev -spec: - additionalPrinterColumns: - - JSONPath: .status.conditions[?(@.type=="Succeeded")].status - name: Succeeded - type: string - - JSONPath: .status.conditions[?(@.type=="Succeeded")].reason - name: Reason - type: string - - JSONPath: .status.startTime - name: StartTime - type: date - - JSONPath: .status.completionTime - name: CompletionTime - type: date - group: build.knative.dev - names: - categories: - - all - - knative - kind: Build - plural: builds - scope: Namespaced - version: v1alpha1 -status: - conditions: - - lastTransitionTime: 2018-12-06T02:02:55Z - message: no conflicts found - reason: NoConflicts - status: "True" - type: NamesAccepted - - lastTransitionTime: 2018-12-06T02:02:55Z - message: the initial names have been accepted - reason: InitialNamesAccepted - status: "True" - type: Established - -*/ diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_registration_v1_api_service.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_registration_v1_api_service.go deleted file mode 100644 index 6aa109265..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_registration_v1_api_service.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type APIRegistrationV1APIService struct { - resource ctlres.Resource - ignoreFailing bool -} - -func NewAPIRegistrationV1APIService(resource ctlres.Resource, ignoreFailing bool) *APIRegistrationV1APIService { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "apiregistration.k8s.io/v1", - Kind: "APIService", - } - if matcher.Matches(resource) { - return &APIRegistrationV1APIService{resource, ignoreFailing} - } - return nil -} - -func (s APIRegistrationV1APIService) IsDoneApplying() DoneApplyState { - allTrue, msg := Conditions{s.resource}.IsSelectedTrue([]string{"Available"}) - - if !allTrue && s.ignoreFailing { - return DoneApplyState{Done: true, Successful: true, Message: fmt.Sprintf("Ignoring (%s)", msg)} - } - - return DoneApplyState{Done: allTrue, Successful: allTrue, Message: msg} -} - -/* - -status: - conditions: - - lastTransitionTime: 2019-12-03T16:52:14Z - message: all checks passed - reason: Passed - status: "True" - type: Available - -*/ diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_registration_v1beta1_api_service.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_registration_v1beta1_api_service.go deleted file mode 100644 index 83187ea87..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/api_registration_v1beta1_api_service.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type APIRegistrationV1Beta1APIService struct { - resource ctlres.Resource - ignoreFailing bool -} - -func NewAPIRegistrationV1Beta1APIService(resource ctlres.Resource, ignoreFailing bool) *APIRegistrationV1Beta1APIService { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "apiregistration.k8s.io/v1beta1", - Kind: "APIService", - } - if matcher.Matches(resource) { - return &APIRegistrationV1Beta1APIService{resource, ignoreFailing} - } - return nil -} - -func (s APIRegistrationV1Beta1APIService) IsDoneApplying() DoneApplyState { - allTrue, msg := Conditions{s.resource}.IsSelectedTrue([]string{"Available"}) - - if !allTrue && s.ignoreFailing { - return DoneApplyState{Done: true, Successful: true, Message: fmt.Sprintf("Ignoring (%s)", msg)} - } - - return DoneApplyState{Done: allTrue, Successful: allTrue, Message: msg} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_daemon_set.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_daemon_set.go deleted file mode 100644 index 9fe415c7e..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_daemon_set.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appsv1 "k8s.io/api/apps/v1" -) - -type AppsV1DaemonSet struct { - resource ctlres.Resource -} - -func NewAppsV1DaemonSet(resource ctlres.Resource) *AppsV1DaemonSet { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "apps/v1", - Kind: "DaemonSet", - } - if matcher.Matches(resource) { - return &AppsV1DaemonSet{resource} - } - return nil -} - -func (s AppsV1DaemonSet) IsDoneApplying() DoneApplyState { - dset := appsv1.DaemonSet{} - - err := s.resource.AsTypedObj(&dset) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - if dset.Generation != dset.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", dset.Generation)} - } - - // ensure updated pods are actually scheduled before checking number unavailable to avoid - // race condition between pod scheduler and kapp state check - notReady := dset.Status.DesiredNumberScheduled - dset.Status.UpdatedNumberScheduled - if notReady > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d updated pods to be scheduled", notReady)} - } - - if dset.Status.NumberUnavailable > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d unavailable pods", dset.Status.NumberUnavailable)} - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_deployment.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_deployment.go deleted file mode 100644 index 9c86a5d07..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_deployment.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - "strconv" - "strings" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" -) - -const ( - appsV1DeploymentWaitMinimumReplicasAvailableAnnKey = "kapp.k14s.io/apps-v1-deployment-wait-minimum-replicas-available" // values: "10", "5%" -) - -type AppsV1Deployment struct { - resource ctlres.Resource - associatedRs []ctlres.Resource -} - -func NewAppsV1Deployment(resource ctlres.Resource, associatedRs []ctlres.Resource) *AppsV1Deployment { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "apps/v1", - Kind: "Deployment", - } - if matcher.Matches(resource) { - return &AppsV1Deployment{resource, associatedRs} - } - return nil -} - -func (s AppsV1Deployment) IsDoneApplying() DoneApplyState { - dep := appsv1.Deployment{} - - err := s.resource.AsTypedObj(&dep) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - if dep.Generation != dep.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", dep.Generation)} - } - - for _, cond := range dep.Status.Conditions { - switch cond.Type { - case appsv1.DeploymentProgressing: - if cond.Status == corev1.ConditionFalse { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Deployment is not progressing: %s, message: %s", cond.Reason, cond.Message)} - } - - case appsv1.DeploymentReplicaFailure: - if cond.Status == corev1.ConditionTrue { - return DoneApplyState{Done: false, Successful: false, Message: fmt.Sprintf( - "Deployment has encountered replica failure: %s, message: %s", cond.Reason, cond.Message)} - } - } - } - - // TODO ideally we would not condition this on len of associated resources - if len(s.associatedRs) > 0 { - minRepAvailable, found := s.resource.Annotations()[appsV1DeploymentWaitMinimumReplicasAvailableAnnKey] - if found { - return s.isMinReplicasAvailable(dep, minRepAvailable) - } - } - - if dep.Status.UnavailableReplicas > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d unavailable replicas", dep.Status.UnavailableReplicas)} - } - - if dep.Spec.Replicas != nil && dep.Status.ReadyReplicas < *dep.Spec.Replicas { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d/%d replicas to be ready", *dep.Spec.Replicas-dep.Status.ReadyReplicas, *dep.Spec.Replicas)} - } - return DoneApplyState{Done: true, Successful: true} -} - -func (s AppsV1Deployment) isMinReplicasAvailable(dep appsv1.Deployment, expectedMinRepAvailableStr string) DoneApplyState { - isPercent := strings.HasSuffix(expectedMinRepAvailableStr, "%") - - minRepAvailable, err := strconv.Atoi(strings.TrimSuffix(expectedMinRepAvailableStr, "%")) - if err != nil { - return DoneApplyState{Done: true, Successful: false, - Message: fmt.Sprintf("Error: Failed to parse %s: %s", appsV1DeploymentWaitMinimumReplicasAvailableAnnKey, err)} - } - - if dep.Spec.Replicas == nil { - return DoneApplyState{Done: true, Successful: false, - Message: fmt.Sprintf("Error: Failed to find spec.replicas")} - } - - totalReplicas := int(*dep.Spec.Replicas) - - if isPercent { - minRepAvailable = totalReplicas * minRepAvailable / 100 - } - - if minRepAvailable > totalReplicas { - minRepAvailable = totalReplicas - } - if totalReplicas > 0 && minRepAvailable <= 0 { - minRepAvailable = 1 - } - - rs, err := s.findLatestReplicaSet(dep) - if err != nil { - return DoneApplyState{Done: true, Successful: false, - Message: fmt.Sprintf("Error: Failed to find latest replicaset: %s", err)} - } - - return rs.IsDoneApplyingWithMinimum(minRepAvailable) -} - -const ( - deploymentRevAnnKey = "deployment.kubernetes.io/revision" -) - -func (s AppsV1Deployment) findLatestReplicaSet(dep appsv1.Deployment) (*ExtensionsAndAppsVxReplicaSet, error) { - expectedRevKey, found := dep.Annotations[deploymentRevAnnKey] - if !found { - return nil, fmt.Errorf("Expected to find '%s' but did not", deploymentRevAnnKey) - } - - for _, res := range s.associatedRs { - // Cannot use appsv1 RS since no gurantee which versions are in associated resources - rs := NewExtensionsAndAppsVxReplicaSet(res) - if rs != nil && res.Annotations()[deploymentRevAnnKey] == expectedRevKey { - return rs, nil - } - } - - return nil, fmt.Errorf("Expected to find replica set (rev %s) in associated resources but did not", expectedRevKey) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_replica_set.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_replica_set.go deleted file mode 100644 index ccd131c9c..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_replica_set.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appsv1 "k8s.io/api/apps/v1" -) - -type AppsV1ReplicaSet struct { - resource ctlres.Resource -} - -func NewAppsV1ReplicaSet(resource ctlres.Resource) *AppsV1ReplicaSet { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "apps/v1", - Kind: "ReplicaSet", - } - if matcher.Matches(resource) { - return &AppsV1ReplicaSet{resource} - } - return nil -} - -func (s AppsV1ReplicaSet) IsDoneApplying() DoneApplyState { - rs := appsv1.ReplicaSet{} - - err := s.resource.AsTypedObj(&rs) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - if rs.Generation != rs.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", rs.Generation)} - } - - if rs.Status.Replicas != rs.Status.AvailableReplicas { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d unavailable replicas", rs.Status.Replicas-rs.Status.AvailableReplicas)} - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_stateful_set.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_stateful_set.go deleted file mode 100644 index 941c3f5f3..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/apps_v1_stateful_set.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appsv1 "k8s.io/api/apps/v1" -) - -type AppsV1StatefulSet struct { - resource ctlres.Resource -} - -func NewAppsV1StatefulSet(resource ctlres.Resource, _ []ctlres.Resource) *AppsV1StatefulSet { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "apps/v1", - Kind: "StatefulSet", - } - if matcher.Matches(resource) { - return &AppsV1StatefulSet{resource} - } - return nil -} - -func (s AppsV1StatefulSet) IsDoneApplying() DoneApplyState { - statefulSet := appsv1.StatefulSet{} - - err := s.resource.AsTypedObj(&statefulSet) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - if statefulSet.Generation != statefulSet.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", statefulSet.Generation)} - } - - if statefulSet.Spec.Replicas == nil { - return DoneApplyState{Done: true, Successful: false, - Message: fmt.Sprintf("Error: Failed to find spec.replicas")} - } - - toUpdate := *statefulSet.Spec.Replicas - clarification := "" - if s.partition(statefulSet) { - toUpdate -= *statefulSet.Spec.UpdateStrategy.RollingUpdate.Partition - clarification = fmt.Sprintf(" (updating only %d of %d total)", - toUpdate, *statefulSet.Spec.Replicas) - } - - // ensure replicas have been updated - notUpdated := toUpdate - statefulSet.Status.UpdatedReplicas - if notUpdated > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d replicas to be updated%s", notUpdated, clarification)} - } - - // ensure replicas are available - notReady := *statefulSet.Spec.Replicas - statefulSet.Status.ReadyReplicas - if notReady > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d replicas to be ready", notReady)} - } - - // ensure all replicas have been deleted when scaling down - notDeleted := statefulSet.Status.Replicas - *statefulSet.Spec.Replicas - if notDeleted > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for %d replicas to be deleted", notDeleted)} - } - - return DoneApplyState{Done: true, Successful: true} -} - -func (AppsV1StatefulSet) partition(statefulSet appsv1.StatefulSet) bool { - return statefulSet.Spec.UpdateStrategy.RollingUpdate != nil && - statefulSet.Spec.UpdateStrategy.RollingUpdate.Partition != nil && - *statefulSet.Spec.UpdateStrategy.RollingUpdate.Partition > 0 -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/batch_v1_job.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/batch_v1_job.go deleted file mode 100644 index 93649ef49..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/batch_v1_job.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" -) - -type BatchV1Job struct { - resource ctlres.Resource -} - -func NewBatchV1Job(resource ctlres.Resource) *BatchV1Job { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "batch/v1", - Kind: "Job", - } - if matcher.Matches(resource) { - return &BatchV1Job{resource} - } - return nil -} - -func (s BatchV1Job) IsDoneApplying() DoneApplyState { - job := batchv1.Job{} - - err := s.resource.AsTypedObj(&job) - if err != nil { - return DoneApplyState{Done: true, Successful: false, - Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - for _, cond := range job.Status.Conditions { - switch { - case cond.Type == batchv1.JobComplete && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: true, Message: "Completed"} - - case cond.Type == batchv1.JobFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, - Message: fmt.Sprintf("Failed with reason %s: %s", cond.Reason, cond.Message)} - } - } - - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting to complete (%d active, %d failed, %d succeeded)", - job.Status.Active, job.Status.Failed, job.Status.Succeeded)} -} - -/* - -status: - conditions: - - lastProbeTime: "2019-06-26T22:18:22Z" - lastTransitionTime: "2019-06-26T22:18:22Z" - message: Job has reached the specified backoff limit - reason: BackoffLimitExceeded - status: "True" - type: Failed - failed: 7 - startTime: "2019-06-26T22:07:50Z" - -*/ diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/batch_vx_cron_job.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/batch_vx_cron_job.go deleted file mode 100644 index e129a03c2..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/batch_vx_cron_job.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type BatchVxCronJob struct { - resource ctlres.Resource -} - -func NewBatchVxCronJob(resource ctlres.Resource) *BatchVxCronJob { - matcher := ctlres.APIGroupKindMatcher{ - APIGroup: "batch", - Kind: "CronJob", - } - if matcher.Matches(resource) { - return &BatchVxCronJob{resource} - } - return nil -} - -func (s BatchVxCronJob) IsDoneApplying() DoneApplyState { - // Always return success as we do not want to pick up associated - // pods that might have previously failed - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/conditions.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/conditions.go deleted file mode 100644 index 35fc64721..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/conditions.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type Conditions struct { - resource ctlres.Resource -} - -func (c Conditions) IsSelectedTrue(checkedTypes []string) (bool, string) { - statuses := c.statuses() - - for _, t := range checkedTypes { - status, found := statuses[t] - if !found { - return false, fmt.Sprintf("Condition %s is not set", t) - } - if status != "True" { - return false, fmt.Sprintf("Condition %s is not True (%s)", t, status) - } - } - - return true, "" -} - -func (c Conditions) IsAllTrue() (bool, string) { - statuses := c.statuses() - if len(statuses) == 0 { - return false, "No conditions found" - } - - for t, status := range c.statuses() { - if status != "True" { - return false, fmt.Sprintf("Condition %s is not True (%s)", t, status) - } - } - - return true, "" -} - -func (c Conditions) statuses() map[string]string { - statuses := map[string]string{} - if conditions, ok := c.resource.Status()["conditions"].([]interface{}); ok { - for _, cond := range conditions { - if typedCond, ok := cond.(map[string]interface{}); ok { - if typedType, ok := typedCond["type"].(string); ok { - if typedStatus, ok := typedCond["status"].(string); ok { - statuses[typedType] = typedStatus - } - } - } - } - } - return statuses -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/core_v1_pod.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/core_v1_pod.go deleted file mode 100644 index 7a7198477..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/core_v1_pod.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - corev1 "k8s.io/api/core/v1" -) - -// https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ -type CoreV1Pod struct { - resource ctlres.Resource -} - -func NewCoreV1Pod(resource ctlres.Resource) *CoreV1Pod { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "v1", - Kind: "Pod", - } - if matcher.Matches(resource) { - return &CoreV1Pod{resource} - } - return nil -} - -func (s CoreV1Pod) IsDoneApplying() DoneApplyState { - pod := corev1.Pod{} - - err := s.resource.AsTypedObj(&pod) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - // TODO deal with failure scenarios (retry, timeout?) - switch pod.Status.Phase { - // Pending: The Pod has been accepted by the Kubernetes system, but one or more of the - // Container images has not been created. This includes time before being scheduled as - // well as time spent downloading images over the network, which could take a while. - case "Pending": - return DoneApplyState{Done: false, Message: s.detailedMsg("Pending", s.pendingDetailsReason(pod))} - - // Running: The Pod has been bound to a node, and all of the Containers have been created. - // At least one Container is still running, or is in the process of starting or restarting. - case "Running": - allTrue, msg := Conditions{s.resource}.IsSelectedTrue([]string{"Initialized", "Ready", "PodScheduled"}) - return DoneApplyState{Done: allTrue, Successful: allTrue, Message: msg} - - // Succeeded: All Containers in the Pod have terminated in success, and will not be restarted. - case "Succeeded": - return DoneApplyState{Done: true, Successful: true, Message: ""} - - // Failed: All Containers in the Pod have terminated, and at least one Container has - // terminated in failure. That is, the Container either exited with non-zero status - // or was terminated by the system. - case "Failed": - return DoneApplyState{Done: true, Successful: false, Message: "Phase is failed"} - - // Unknown: For some reason the state of the Pod could not be obtained, - // typically due to an error in communicating with the host of the Pod. - case "Unknown": - return DoneApplyState{Done: true, Successful: false, Message: "Phase is unknown"} - - default: - return DoneApplyState{Done: false, Message: "Undetermined phase"} - } -} - -func (s CoreV1Pod) detailedMsg(state, msg string) string { - if len(msg) > 0 { - return state + ": " + msg - } - return state -} - -func (s CoreV1Pod) pendingDetailsReason(pod corev1.Pod) string { - statuses := append([]corev1.ContainerStatus{}, pod.Status.InitContainerStatuses...) - statuses = append(statuses, pod.Status.ContainerStatuses...) - - // See status example below - for _, st := range statuses { - if st.State.Waiting != nil { - msg := st.State.Waiting.Reason - if len(st.State.Waiting.Message) > 0 { - msg += fmt.Sprintf(", message: %s", st.State.Waiting.Message) - } - return msg - } - } - - // See status example below - for _, cond := range pod.Status.Conditions { - if cond.Type == corev1.PodScheduled { - if cond.Reason == corev1.PodReasonUnschedulable { - msg := cond.Reason - if len(cond.Message) > 0 { - msg += fmt.Sprintf(", message: %s", cond.Message) - } - return msg - } - } - } - - return "" -} - -/* - -# Image cannot be pulled -status: - containerStatuses: - - image: kbld:docker-io-... - imageID: "" - lastState: {} - name: simple-app - ready: false - restartCount: 0 - state: - waiting: - message: 'rpc error: code = Unknown desc = Error response from daemon: repository - kbld not found: does not exist or no pull access' - reason: ErrImagePull - -# Unschedulable pod -status: - conditions: - - lastProbeTime: "2019-07-16T23:53:29Z" - lastTransitionTime: "2019-07-16T23:51:54Z" - message: '0/4 nodes are available: 3 node(s) didn''t match node selector, 4 node(s) - didn''t have free ports for the requested pod ports.' - reason: Unschedulable - status: "False" - type: PodScheduled - phase: Pending - qosClass: Burstable - -*/ diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/core_v1_service.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/core_v1_service.go deleted file mode 100644 index fb147b89b..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/core_v1_service.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - corev1 "k8s.io/api/core/v1" -) - -type CoreV1Service struct { - resource ctlres.Resource -} - -func NewCoreV1Service(resource ctlres.Resource) *CoreV1Service { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: "v1", - Kind: "Service", - } - if matcher.Matches(resource) { - return &CoreV1Service{resource} - } - return nil -} - -func (s CoreV1Service) IsDoneApplying() DoneApplyState { - svc := corev1.Service{} - - err := s.resource.AsTypedObj(&svc) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - if svc.Spec.Type == corev1.ServiceTypeExternalName { - return DoneApplyState{Done: true, Successful: true, Message: "External service"} - } - - if svc.Spec.ClusterIP != corev1.ClusterIPNone && len(svc.Spec.ClusterIP) == 0 { - return DoneApplyState{Done: false, Message: "ClusterIP is empty"} - } - - if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { - if len(svc.Status.LoadBalancer.Ingress) == 0 { - return DoneApplyState{Done: false, Message: "Load balancer ingress is empty"} - } - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/crds.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/crds.go deleted file mode 100644 index 8aee8c18c..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/crds.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - "strings" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type ResourceTypes struct { - localCRDs []*APIExtensionsVxCRD - resourceTypes ctlres.ResourceTypes - memoizedScopes map[string]bool -} - -func NewResourceTypes(newResources []ctlres.Resource, resourceTypes ctlres.ResourceTypes) *ResourceTypes { - var localCRDs []*APIExtensionsVxCRD - - for _, newRes := range newResources { - crd := NewAPIExtensionsVxCRD(newRes) - if crd != nil { - localCRDs = append(localCRDs, crd) - } - } - - return &ResourceTypes{localCRDs, resourceTypes, nil} -} - -func (c *ResourceTypes) IsNamespaced(res ctlres.Resource) (bool, error) { - scopeMap, err := c.scopeMap() - if err != nil { - return false, err - } - - apiVer := res.APIVersion() - if !strings.Contains(apiVer, "/") { - apiVer = "/" + apiVer // core group is empty - } - - fullKind := apiVer + "/" + res.Kind() - - isNamespaced, found := scopeMap[fullKind] - if !found { - msgs := []string{ - "- Kubernetes API server did not have matching apiVersion + kind", - "- No matching CRD was found in given configuration", - } - return false, fmt.Errorf("Expected to find kind '%s', but did not:\n%s", fullKind, strings.Join(msgs, "\n")) - } - - return isNamespaced, nil -} - -func (c *ResourceTypes) scopeMap() (map[string]bool, error) { - if c.memoizedScopes != nil { - return c.memoizedScopes, nil - } - - scopeMap, err := c.clusterScopes() - if err != nil { - return nil, err - } - - scopeMap2, err := c.localCRDScopes() - if err != nil { - return nil, err - } - - // Additional CRDs last to override cluster config - for k, v := range scopeMap2 { - scopeMap[k] = v - } - - c.memoizedScopes = scopeMap - - return scopeMap, nil -} - -func (c *ResourceTypes) clusterScopes() (map[string]bool, error) { - scopeMap := map[string]bool{} - - resTypes, err := c.resourceTypes.All(false) - if err != nil { - return nil, err - } - - for _, resType := range resTypes { - key := resType.APIResource.Group + "/" + resType.APIResource.Version + "/" + resType.APIResource.Kind - scopeMap[key] = resType.APIResource.Namespaced - } - - return scopeMap, nil -} - -func (c *ResourceTypes) localCRDScopes() (map[string]bool, error) { - scopeMap := map[string]bool{} - - for _, crd := range c.localCRDs { - contents, err := crd.contents() - if err != nil { - return nil, err - } - - for _, ver := range contents.Versions() { - key := contents.Spec.Group + "/" + ver + "/" + contents.Spec.Names.Kind - scopeMap[key] = contents.Spec.Scope == "Namespaced" - } - } - - return scopeMap, nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/custom_waiting_resource.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/custom_waiting_resource.go deleted file mode 100644 index 3eb2fd8d0..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/custom_waiting_resource.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - "sync" - "time" - - ctlconf "carvel.dev/kapp/pkg/kapp/config" - ctlres "carvel.dev/kapp/pkg/kapp/resources" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var timeoutMap sync.Map - -type CustomWaitingResource struct { - resource ctlres.Resource - waitRule ctlconf.WaitRule -} - -func NewCustomWaitingResource(resource ctlres.Resource, waitRules []ctlconf.WaitRule) *CustomWaitingResource { - for _, rule := range waitRules { - if rule.ResourceMatcher().Matches(resource) { - return &CustomWaitingResource{resource, rule} - } - } - return nil -} - -type customWaitingResourceStruct struct { - Metadata metav1.ObjectMeta - Status struct { - ObservedGeneration int64 - Conditions []customWaitingResourceCondition - } -} - -type customWaitingResourceCondition struct { - Type string - Status string - Reason string - Message string - ObservedGeneration int64 -} - -func (s CustomWaitingResource) IsDoneApplying() DoneApplyState { - deletingRes := NewDeleting(s.resource) - if deletingRes != nil { - return deletingRes.IsDoneApplying() - } - - obj := customWaitingResourceStruct{} - - err := s.resource.AsUncheckedTypedObj(&obj) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Error: Failed obj conversion: %s", err)} - } - - if s.waitRule.SupportsObservedGeneration && obj.Metadata.Generation != obj.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", obj.Metadata.Generation)} - } - - if s.waitRule.Ytt != nil { - startTime, found := timeoutMap.Load(s.resource.Description()) - if !found { - startTime = time.Now().Unix() - timeoutMap.Store(s.resource.Description(), startTime) - } - configObj, err := WaitRuleContractV1{ - ResourceMatcher: ctlres.AnyMatcher{ - Matchers: ctlconf.ResourceMatchers(s.waitRule.ResourceMatchers).AsResourceMatchers()}, - Starlark: s.waitRule.Ytt.FuncContractV1.Resource, - CurrentTime: time.Now().Unix(), - StartTime: startTime.(int64), - }.Apply(s.resource) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Error: Applying ytt wait rule: %s", err.Error())} - } - message := configObj.Message - if configObj.UnblockChanges { - message = fmt.Sprintf("Allowing blocked changes to proceed: %s", configObj.Message) - } - return DoneApplyState{Done: configObj.Done, Successful: configObj.Successful, - UnblockChanges: configObj.UnblockChanges, Message: message} - } - - hasConditionWaitingForGeneration := false - // Check on failure conditions first - for _, condMatcher := range s.waitRule.ConditionMatchers { - // Check whether timeout has occured - var isTimeOutConditionPresent bool - - for _, cond := range obj.Status.Conditions { - if cond.Type == condMatcher.Type && cond.Status == condMatcher.Status { - if condMatcher.SupportsObservedGeneration && obj.Metadata.Generation != cond.ObservedGeneration { - hasConditionWaitingForGeneration = true - continue - } - - if condMatcher.Timeout != "" { - isTimeOutConditionPresent = true - if s.hasTimeoutOccurred(condMatcher.Timeout, s.resource.Description()) { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Continuously failed for %s with %s: %s, message: %s", - condMatcher.Timeout, cond.Type, cond.Reason, cond.Message)} - } - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "%s: %s (message: %s)", - cond.Type, cond.Reason, cond.Message)} - } - - if condMatcher.Failure { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Encountered failure condition %s == %s: %s, message: %s", - cond.Type, condMatcher.Status, cond.Reason, cond.Message)} - } - } - } - - // Reset the timer in case timeout condition flipped from being present to not present in the Cluster resource status. - // Reset should only happen if condMatcher has timeout. Otherwise, it is possible that condMatcher which dont have timeout will try to reset the map. - if condMatcher.Timeout != "" && !isTimeOutConditionPresent { - timeoutMap.Delete(s.resource.Description()) - continue - } - } - - unblockChangeMsg := "" - message := "No failing or successful conditions found" - - // If no failure conditions found, check on successful ones - for _, condMatcher := range s.waitRule.ConditionMatchers { - for _, cond := range obj.Status.Conditions { - if cond.Type == condMatcher.Type && cond.Status == condMatcher.Status { - if condMatcher.SupportsObservedGeneration && obj.Metadata.Generation != cond.ObservedGeneration { - hasConditionWaitingForGeneration = true - continue - } - if condMatcher.Success { - return DoneApplyState{Done: true, Successful: true, Message: fmt.Sprintf( - "Encountered successful condition %s == %s: %s (message: %s)", - cond.Type, condMatcher.Status, cond.Reason, cond.Message)} - } - if condMatcher.UnblockChanges { - unblockChangeMsg = fmt.Sprintf( - "Allowing blocked changes to proceed: Encountered condition %s == %s: %s", - cond.Type, condMatcher.Status, cond.Reason) - continue - } - if cond.Message != "" { - message = cond.Message - } - } - } - } - - if hasConditionWaitingForGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed by status condition(s)", obj.Metadata.Generation)} - } - - if unblockChangeMsg != "" { - return DoneApplyState{Done: false, UnblockChanges: true, Message: unblockChangeMsg} - } - - return DoneApplyState{Done: false, Message: message} -} - -func (s CustomWaitingResource) hasTimeoutOccurred(timeout string, key string) bool { - expiryTime, found := timeoutMap.Load(key) - if found { - return time.Now().Sub(expiryTime.(time.Time)) > 0 - } - dur, err := time.ParseDuration(timeout) - if err != nil { - dur = 15 * time.Minute - } - timeoutMap.Store(key, time.Now().Add(dur)) - return false -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/deleting.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/deleting.go deleted file mode 100644 index 9ed96efdc..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/deleting.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - "strings" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" -) - -type Deleting struct { - resource ctlres.Resource -} - -func NewDeleting(resource ctlres.Resource) *Deleting { - if resource.IsDeleting() { - return &Deleting{resource} - } - return nil -} - -func (s Deleting) IsDoneApplying() DoneApplyState { - if len(s.resource.Finalizers()) > 0 { - return DoneApplyState{Done: false, Message: fmt.Sprintf("Waiting on finalizers: %s", - strings.Join(s.resource.Finalizers(), ", "))} - } - return DoneApplyState{Done: false, Message: "Deleting"} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/done_apply_state.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/done_apply_state.go deleted file mode 100644 index 3583507be..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/done_apply_state.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -type DoneApplyState struct { - Done bool `json:"done"` - Successful bool `json:"successful"` - Message string `json:"message"` - - UnblockChanges bool `json:"unblockChanges"` -} - -func (s DoneApplyState) TerminallyFailed() bool { - return s.Done && !s.Successful -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/ext_and_apps_vx_replica_set.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/ext_and_apps_vx_replica_set.go deleted file mode 100644 index 761835218..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/ext_and_apps_vx_replica_set.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appsv1 "k8s.io/api/apps/v1" -) - -type ExtensionsAndAppsVxReplicaSet struct { - resource ctlres.Resource -} - -func NewExtensionsAndAppsVxReplicaSet(resource ctlres.Resource) *ExtensionsAndAppsVxReplicaSet { - extMatcher := ctlres.APIGroupKindMatcher{ - APIGroup: "extensions", - Kind: "ReplicaSet", - } - appsMatcher := ctlres.APIGroupKindMatcher{ - APIGroup: "apps", - Kind: "ReplicaSet", - } - if extMatcher.Matches(resource) || appsMatcher.Matches(resource) { - return &ExtensionsAndAppsVxReplicaSet{resource} - } - return nil -} - -func (s ExtensionsAndAppsVxReplicaSet) IsDoneApplyingWithMinimum(minAvailable int) DoneApplyState { - rs := appsv1.ReplicaSet{} - - // TODO unsafely unmarshals any replica set version - err := s.resource.AsUncheckedTypedObj(&rs) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf("Error: Failed obj conversion: %s", err)} - } - - if rs.Generation != rs.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", rs.Generation)} - } - - if int(rs.Status.AvailableReplicas) < minAvailable { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for at least %d available replicas (currently %d available)", - minAvailable-int(rs.Status.AvailableReplicas), rs.Status.AvailableReplicas)} - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/kappctrl_k14s_io_v1alpha1_app.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/kappctrl_k14s_io_v1alpha1_app.go deleted file mode 100644 index e0bf811ce..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/kappctrl_k14s_io_v1alpha1_app.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - kcv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" -) - -func init() { - kcv1alpha1.AddToScheme(scheme.Scheme) -} - -type KappctrlK14sIoV1alpha1App struct { - resource ctlres.Resource -} - -func NewKappctrlK14sIoV1alpha1App(resource ctlres.Resource) *KappctrlK14sIoV1alpha1App { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: kcv1alpha1.SchemeGroupVersion.String(), - Kind: "App", - } - if matcher.Matches(resource) { - return &KappctrlK14sIoV1alpha1App{resource} - } - return nil -} - -func (s KappctrlK14sIoV1alpha1App) IsDoneApplying() DoneApplyState { - app := kcv1alpha1.App{} - - err := s.resource.AsTypedObj(&app) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Error: Failed obj conversion: %s", err)} - } - - if app.Generation != app.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", app.Generation)} - } - - for _, cond := range app.Status.Conditions { - errorMsg := app.Status.UsefulErrorMessage - if errorMsg == "" { - errorMsg = cond.Message - } - switch { - case cond.Type == kcv1alpha1.Reconciling && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: false, Message: "Reconciling"} - - case cond.Type == kcv1alpha1.ReconcileFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Reconcile failed: message: %s", errorMsg)} - - case cond.Type == kcv1alpha1.DeleteFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Delete failed: message: %s", errorMsg)} - } - } - - deletingRes := NewDeleting(s.resource) - if deletingRes != nil { - return deletingRes.IsDoneApplying() - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/packaging_carvel_dev_v1alpha1_packageinstall.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/packaging_carvel_dev_v1alpha1_packageinstall.go deleted file mode 100644 index 95414b843..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/packaging_carvel_dev_v1alpha1_packageinstall.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1" - pkgv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" -) - -func init() { - pkgv1alpha1.AddToScheme(scheme.Scheme) -} - -type PackagingCarvelDevV1alpha1PackageInstall struct { - resource ctlres.Resource -} - -func NewPackagingCarvelDevV1alpha1PackageInstall(resource ctlres.Resource) *PackagingCarvelDevV1alpha1PackageInstall { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: pkgv1alpha1.SchemeGroupVersion.String(), - Kind: "PackageInstall", - } - if matcher.Matches(resource) { - return &PackagingCarvelDevV1alpha1PackageInstall{resource} - } - return nil -} - -func (s PackagingCarvelDevV1alpha1PackageInstall) IsDoneApplying() DoneApplyState { - pkgInstall := pkgv1alpha1.PackageInstall{} - - err := s.resource.AsTypedObj(&pkgInstall) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Error: Failed obj conversion: %s", err)} - } - - if pkgInstall.Generation != pkgInstall.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", pkgInstall.Generation)} - } - - for _, cond := range pkgInstall.Status.Conditions { - errorMsg := pkgInstall.Status.UsefulErrorMessage - if errorMsg == "" { - errorMsg = cond.Message - } - switch { - case cond.Type == appv1alpha1.Reconciling && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: false, Message: "Reconciling"} - - case cond.Type == appv1alpha1.ReconcileFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Reconcile failed: message: %s", errorMsg)} - - case cond.Type == appv1alpha1.DeleteFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Delete failed: message: %s", errorMsg)} - } - } - - deletingRes := NewDeleting(s.resource) - if deletingRes != nil { - return deletingRes.IsDoneApplying() - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/packaging_carvel_dev_v1alpha1_packagerepository.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/packaging_carvel_dev_v1alpha1_packagerepository.go deleted file mode 100644 index 8972aa179..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/packaging_carvel_dev_v1alpha1_packagerepository.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - appv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1" - pkgv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" -) - -func init() { - pkgv1alpha1.AddToScheme(scheme.Scheme) -} - -type PackagingCarvelDevV1alpha1PackageRepo struct { - resource ctlres.Resource -} - -func NewPackagingCarvelDevV1alpha1PackageRepo(resource ctlres.Resource) *PackagingCarvelDevV1alpha1PackageRepo { - matcher := ctlres.APIVersionKindMatcher{ - APIVersion: pkgv1alpha1.SchemeGroupVersion.String(), - Kind: "PackageRepository", - } - if matcher.Matches(resource) { - return &PackagingCarvelDevV1alpha1PackageRepo{resource} - } - return nil -} - -func (s PackagingCarvelDevV1alpha1PackageRepo) IsDoneApplying() DoneApplyState { - pkgRepo := pkgv1alpha1.PackageRepository{} - - err := s.resource.AsTypedObj(&pkgRepo) - if err != nil { - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Error: Failed obj conversion: %s", err)} - } - - if pkgRepo.Generation != pkgRepo.Status.ObservedGeneration { - return DoneApplyState{Done: false, Message: fmt.Sprintf( - "Waiting for generation %d to be observed", pkgRepo.Generation)} - } - - for _, cond := range pkgRepo.Status.Conditions { - errorMsg := pkgRepo.Status.UsefulErrorMessage - if errorMsg == "" { - errorMsg = cond.Message - } - switch { - case cond.Type == appv1alpha1.Reconciling && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: false, Message: "Reconciling"} - - case cond.Type == appv1alpha1.ReconcileFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Reconcile failed: message: %s", errorMsg)} - - case cond.Type == appv1alpha1.DeleteFailed && cond.Status == corev1.ConditionTrue: - return DoneApplyState{Done: true, Successful: false, Message: fmt.Sprintf( - "Delete failed: message: %s", errorMsg)} - } - } - - deletingRes := NewDeleting(s.resource) - if deletingRes != nil { - return deletingRes.IsDoneApplying() - } - - return DoneApplyState{Done: true, Successful: true} -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/wait_rule_contract_v1.go b/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/wait_rule_contract_v1.go deleted file mode 100644 index 077247eeb..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/resourcesmisc/wait_rule_contract_v1.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resourcesmisc - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - cmdtpl "github.com/k14s/ytt/pkg/cmd/template" - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "sigs.k8s.io/yaml" -) - -type WaitRuleContractV1 struct { - ResourceMatcher ctlres.ResourceMatcher - Starlark string - CurrentTime int64 - StartTime int64 -} - -type waitRuleContractV1Result struct { - Result WaitRuleContractV1ResultDetails -} - -type WaitRuleContractV1ResultDetails struct { - Done bool `json:"done"` - Successful bool `json:"successful"` - Message string `json:"message"` - - UnblockChanges bool `json:"unblockChanges"` -} - -func (t WaitRuleContractV1) Apply(res ctlres.Resource) (*WaitRuleContractV1ResultDetails, error) { - if !t.ResourceMatcher.Matches(res) { - return nil, nil - } - - return t.evalYtt(res) -} - -func (t WaitRuleContractV1) evalYtt(res ctlres.Resource) (*WaitRuleContractV1ResultDetails, error) { - opts := cmdtpl.NewOptions() - - opts.DataValuesFlags.FromFiles = []string{"values.yml"} - opts.DataValuesFlags.ReadFileFunc = func(path string) ([]byte, error) { - if path != "values.yml" { - return nil, fmt.Errorf("Unknown file to read: %s", path) - } - return yaml.Marshal(res.DeepCopyRaw()) - } - opts.DataValuesFlags.KVsFromStrings = []string{fmt.Sprintf("startTime=%d", t.StartTime), fmt.Sprintf("currentTime=%d", t.CurrentTime)} - - filesToProcess := []*files.File{ - files.MustNewFileFromSource(files.NewBytesSource("resource.star", []byte(t.Starlark))), - files.MustNewFileFromSource(files.NewBytesSource("config.yml", t.getConfigYAML())), - } - - out := opts.RunWithFiles(cmdtpl.Input{Files: filesToProcess}, ui.NewTTY(false)) - if out.Err != nil { - return nil, fmt.Errorf("Evaluating: %w", out.Err) - } - - if len(out.Files) == 0 { - fmt.Printf("Expected to find config.yml but saw zero files") - } - - file := out.Files[0] - if file.RelativePath() != "config.yml" { - fmt.Printf("Expected config.yml but was: %s", file.RelativePath()) - } - - configObj := waitRuleContractV1Result{} - - err := yaml.Unmarshal(file.Bytes(), &configObj) - if err != nil { - return nil, fmt.Errorf("Deserializing result: %w", err) - } - - return &configObj.Result, nil -} - -func (t WaitRuleContractV1) getConfigYAML() []byte { - config := ` -#@ load("resource.star", "is_done") -#@ load("@ytt:data", "data") - -result: #@ is_done(data.values) -` - return []byte(config) -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/util/retry.go b/vendor/carvel.dev/kapp/pkg/kapp/util/retry.go deleted file mode 100644 index cc7860283..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/util/retry.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package util - -import ( - // "fmt" - "time" - - "k8s.io/apimachinery/pkg/util/wait" -) - -// Retry is different from wait.Poll because -// it does not stop retrying when error is encountered -func Retry(interval, timeout time.Duration, condFunc wait.ConditionFunc) error { - var lastErr error - var times int - - wait.PollImmediate(interval, timeout, func() (bool, error) { - done, err := condFunc() - lastErr = err - times++ - return done, nil - }) - - if lastErr != nil { - // TODO should not wrap error as it may lose necessary type info - // eg resources.Update needs to return status info - // return fmt.Errorf("Retried %d times: %s", times, lastErr) - return lastErr - } - - return nil -} - -// Retry is different from wait.Poll because -// it does not stop retrying when error is encountered -func Retry2(interval, timeout time.Duration, shouldRetryFunc func(error) bool, performFunc func() error) error { - var lastErr error - - wait.PollImmediate(interval, timeout, func() (bool, error) { - err := performFunc() - lastErr = err - done := err == nil || shouldRetryFunc(err) == false - return done, nil - }) - - if lastErr != nil { - // TODO should not wrap error as it may lose necessary type info - // eg resources.Update needs to return status info - // return fmt.Errorf("Retried %d times: %s", times, lastErr) - return lastErr - } - - return nil -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/util/throttle.go b/vendor/carvel.dev/kapp/pkg/kapp/util/throttle.go deleted file mode 100644 index 53a7a0a8f..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/util/throttle.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package util - -import ( - "fmt" -) - -type Throttle struct { - ch chan struct{} -} - -func NewThrottle(max int) Throttle { - if max < 1 { - panic(fmt.Sprintf("Expected maximum throttle to be >= 1, but was %d", max)) - } - ch := make(chan struct{}, max) - for i := 0; i < max; i++ { - ch <- struct{}{} - } - return Throttle{ch} -} - -func (t Throttle) Take() { <-t.ch } -func (t Throttle) Done() { t.ch <- struct{}{} } diff --git a/vendor/carvel.dev/kapp/pkg/kapp/version/version.go b/vendor/carvel.dev/kapp/pkg/kapp/version/version.go deleted file mode 100644 index 353b96e20..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/version/version.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package version - -import "runtime/debug" - -var ( - // Version can be set via: - // -ldflags="-X 'carvel.dev/kapp/pkg/kapp/version.Version=$TAG'" - defaultVersion = "develop" - Version = "" - moduleName = "carvel.dev/kapp" -) - -func init() { - Version = version() -} - -func version() string { - if Version != "" { - // Version was set via ldflags, just return it. - return Version - } - - info, ok := debug.ReadBuildInfo() - if !ok { - return defaultVersion - } - - // Anything else. - for _, dep := range info.Deps { - if dep.Path == moduleName { - return dep.Version - } - } - - return defaultVersion -} diff --git a/vendor/carvel.dev/kapp/pkg/kapp/yttresmod/overlay_contract_v1_mod.go b/vendor/carvel.dev/kapp/pkg/kapp/yttresmod/overlay_contract_v1_mod.go deleted file mode 100644 index 091ac58c1..000000000 --- a/vendor/carvel.dev/kapp/pkg/kapp/yttresmod/overlay_contract_v1_mod.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yttresmod - -import ( - "fmt" - - ctlres "carvel.dev/kapp/pkg/kapp/resources" - cmdtpl "github.com/k14s/ytt/pkg/cmd/template" - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "sigs.k8s.io/yaml" -) - -type OverlayContractV1Mod struct { - ResourceMatcher ctlres.ResourceMatcher - OverlayYAML string - - // TODO support rebase_resource(res, sources) func via .star file? - // Starlark string -} - -var _ ctlres.ResourceModWithMultiple = OverlayContractV1Mod{} - -func (t OverlayContractV1Mod) IsResourceMatching(res ctlres.Resource) bool { - if res == nil || !t.ResourceMatcher.Matches(res) { - return false - } - return true -} - -func (t OverlayContractV1Mod) ApplyFromMultiple(res ctlres.Resource, srcs map[ctlres.FieldCopyModSource]ctlres.Resource) error { - result, err := t.evalYtt(res, srcs) - if err != nil { - return fmt.Errorf("Applying ytt (overlayContractV1): %w", err) - } - - res.DeepCopyIntoFrom(result) - return nil -} - -func (t OverlayContractV1Mod) evalYtt(res ctlres.Resource, srcs map[ctlres.FieldCopyModSource]ctlres.Resource) (ctlres.Resource, error) { - opts := cmdtpl.NewOptions() - - opts.DataValuesFlags.FromFiles = []string{"values.yml"} - opts.DataValuesFlags.ReadFileFunc = func(path string) ([]byte, error) { - if path != "values.yml" { - return nil, fmt.Errorf("Unknown file to read: %s", path) - } - return t.valuesYAML(srcs) - } - - resYAMLBs, err := res.AsYAMLBytes() - if err != nil { - return nil, err - } - - filesToProcess := []*files.File{ - // Current resource we are working with: - files.MustNewFileFromSource(files.NewBytesSource("resource.yml", resYAMLBs)), - // Overlay instructions - files.MustNewFileFromSource(files.NewBytesSource("overlay.yml", []byte(t.OverlayYAML))), - } - - out := opts.RunWithFiles(cmdtpl.Input{Files: filesToProcess}, ui.NewTTY(false)) - if out.Err != nil { - return nil, fmt.Errorf("Evaluating: %w", out.Err) - } - - if len(out.Files) == 0 { - return nil, fmt.Errorf("Expected to find resource.yml but saw zero files") - } - - file := out.Files[0] - if file.RelativePath() != "resource.yml" { - return nil, fmt.Errorf("Expected resource.yml but was: %s", file.RelativePath()) - } - - rs, err := ctlres.NewResourcesFromBytes(file.Bytes()) - if err != nil { - return nil, fmt.Errorf("Deserializing result: %w", err) - } - - if len(rs) != 1 { - return nil, fmt.Errorf("Expected one resource to be returned from ytt, but was %d", len(rs)) - } - - return rs[0], nil -} - -func (t OverlayContractV1Mod) valuesYAML(srcs map[ctlres.FieldCopyModSource]ctlres.Resource) ([]byte, error) { - values := map[string]interface{}{} - for src, res := range srcs { - if res != nil { - values[string(src)] = res.DeepCopyRaw() - } else { - values[string(src)] = nil - } - } - return yaml.Marshal(values) -} diff --git a/vendor/carvel.dev/vendir/LICENSE b/vendor/carvel.dev/vendir/LICENSE deleted file mode 100644 index 1a9893b43..000000000 --- a/vendor/carvel.dev/vendir/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/carvel.dev/vendir/NOTICE b/vendor/carvel.dev/vendir/NOTICE deleted file mode 100644 index 9135d9685..000000000 --- a/vendor/carvel.dev/vendir/NOTICE +++ /dev/null @@ -1,15 +0,0 @@ -vendir - -Copyright (c) 2019-Present Pivotal Software, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/config.go b/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/config.go deleted file mode 100644 index 02011d05f..000000000 --- a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/config.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - "bytes" - "encoding/json" - "strings" -) - -// +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true -type VersionSelection struct { - Semver *VersionSelectionSemver `json:"semver,omitempty" protobuf:"bytes,1,opt,name=semver"` -} - -// +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true -type VersionSelectionSemver struct { - Constraints string `json:"constraints,omitempty" protobuf:"bytes,1,opt,name=constraints"` - Prereleases *VersionSelectionSemverPrereleases `json:"prereleases,omitempty" protobuf:"bytes,2,opt,name=prereleases"` -} - -// +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true -type VersionSelectionSemverPrereleases struct { - Identifiers []string `json:"identifiers,omitempty" protobuf:"bytes,1,opt,name=identifiers"` -} - -func (p VersionSelectionSemverPrereleases) IdentifiersAsMap() map[string]struct{} { - result := map[string]struct{}{} - for _, name := range p.Identifiers { - result[name] = struct{}{} - } - return result -} - -func (vs VersionSelection) Description() string { - // json.Marshal encodes <,>,& as unicode replacement runes - // (https://pkg.go.dev/encoding/json#Marshal) - buffer := &bytes.Buffer{} - encoder := json.NewEncoder(buffer) - encoder.SetEscapeHTML(false) - if err := encoder.Encode(vs); err != nil { - return "?" - } - return strings.TrimSpace(string(buffer.Bytes())) -} diff --git a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.pb.go b/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.pb.go deleted file mode 100644 index 56ab6b7dc..000000000 --- a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.pb.go +++ /dev/null @@ -1,723 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *VersionSelection) Reset() { *m = VersionSelection{} } -func (*VersionSelection) ProtoMessage() {} -func (*VersionSelection) Descriptor() ([]byte, []int) { - return fileDescriptor_f7fa722d77d11bd9, []int{0} -} -func (m *VersionSelection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VersionSelection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VersionSelection) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionSelection.Merge(m, src) -} -func (m *VersionSelection) XXX_Size() int { - return m.Size() -} -func (m *VersionSelection) XXX_DiscardUnknown() { - xxx_messageInfo_VersionSelection.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionSelection proto.InternalMessageInfo - -func (m *VersionSelectionSemver) Reset() { *m = VersionSelectionSemver{} } -func (*VersionSelectionSemver) ProtoMessage() {} -func (*VersionSelectionSemver) Descriptor() ([]byte, []int) { - return fileDescriptor_f7fa722d77d11bd9, []int{1} -} -func (m *VersionSelectionSemver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VersionSelectionSemver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VersionSelectionSemver) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionSelectionSemver.Merge(m, src) -} -func (m *VersionSelectionSemver) XXX_Size() int { - return m.Size() -} -func (m *VersionSelectionSemver) XXX_DiscardUnknown() { - xxx_messageInfo_VersionSelectionSemver.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionSelectionSemver proto.InternalMessageInfo - -func (m *VersionSelectionSemverPrereleases) Reset() { *m = VersionSelectionSemverPrereleases{} } -func (*VersionSelectionSemverPrereleases) ProtoMessage() {} -func (*VersionSelectionSemverPrereleases) Descriptor() ([]byte, []int) { - return fileDescriptor_f7fa722d77d11bd9, []int{2} -} -func (m *VersionSelectionSemverPrereleases) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VersionSelectionSemverPrereleases) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VersionSelectionSemverPrereleases) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionSelectionSemverPrereleases.Merge(m, src) -} -func (m *VersionSelectionSemverPrereleases) XXX_Size() int { - return m.Size() -} -func (m *VersionSelectionSemverPrereleases) XXX_DiscardUnknown() { - xxx_messageInfo_VersionSelectionSemverPrereleases.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionSelectionSemverPrereleases proto.InternalMessageInfo - -func init() { - proto.RegisterType((*VersionSelection)(nil), "carvel.dev.vendir.pkg.vendir.versions.v1alpha1.VersionSelection") - proto.RegisterType((*VersionSelectionSemver)(nil), "carvel.dev.vendir.pkg.vendir.versions.v1alpha1.VersionSelectionSemver") - proto.RegisterType((*VersionSelectionSemverPrereleases)(nil), "carvel.dev.vendir.pkg.vendir.versions.v1alpha1.VersionSelectionSemverPrereleases") -} - -func init() { - proto.RegisterFile("carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.proto", fileDescriptor_f7fa722d77d11bd9) -} - -var fileDescriptor_f7fa722d77d11bd9 = []byte{ - // 333 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x8f, 0x31, 0x4b, 0xfb, 0x40, - 0x18, 0xc6, 0x73, 0xff, 0x3f, 0x14, 0x7b, 0x19, 0x94, 0x08, 0x52, 0x1c, 0xae, 0xda, 0xa9, 0x8b, - 0x17, 0x2a, 0xb8, 0x3a, 0x44, 0x10, 0xdc, 0x34, 0x85, 0x0e, 0x6e, 0x69, 0xf2, 0x36, 0x3d, 0x9b, - 0xde, 0x85, 0xbb, 0x6b, 0x46, 0x71, 0xf0, 0x03, 0xf8, 0xb1, 0x3a, 0x76, 0x92, 0x4e, 0xc5, 0x9e, - 0x5f, 0x44, 0x7a, 0x31, 0x24, 0x88, 0x20, 0x82, 0xdb, 0x73, 0xef, 0xbd, 0xcf, 0xef, 0x7d, 0x1e, - 0x7c, 0x19, 0x47, 0xb2, 0x80, 0x8c, 0x26, 0x50, 0xf8, 0x05, 0xf0, 0x84, 0x49, 0x3f, 0x9f, 0xa5, - 0x95, 0x2c, 0x40, 0x2a, 0x26, 0xb8, 0xf2, 0x8b, 0x41, 0x94, 0xe5, 0xd3, 0x68, 0xe0, 0xa7, 0xc0, - 0x41, 0x46, 0x1a, 0x12, 0x9a, 0x4b, 0xa1, 0x85, 0x47, 0x6b, 0x3f, 0x2d, 0x4d, 0x34, 0x9f, 0xa5, - 0x95, 0xac, 0xfc, 0xb4, 0xf2, 0x1f, 0x9f, 0xa5, 0x4c, 0x4f, 0x17, 0x63, 0x1a, 0x8b, 0xb9, 0x9f, - 0x8a, 0x54, 0xf8, 0x16, 0x33, 0x5e, 0x4c, 0xec, 0xcb, 0x3e, 0xac, 0x2a, 0xf1, 0xbd, 0x47, 0x7c, - 0x30, 0x2a, 0x19, 0x43, 0xc8, 0x20, 0xd6, 0x4c, 0x70, 0xef, 0x01, 0xb7, 0x14, 0xcc, 0x0b, 0x90, - 0x1d, 0x74, 0x82, 0xfa, 0xee, 0xf9, 0xf5, 0x2f, 0x33, 0xd0, 0xaf, 0xc4, 0xa1, 0xa5, 0x05, 0xd8, - 0x6c, 0xba, 0xad, 0x52, 0x87, 0x9f, 0x17, 0x7a, 0xaf, 0x08, 0x1f, 0x7d, 0xbf, 0xee, 0x5d, 0x60, - 0x37, 0x16, 0x5c, 0x69, 0x19, 0x31, 0xae, 0x95, 0xcd, 0xd2, 0x0e, 0x0e, 0x97, 0x9b, 0xae, 0x63, - 0x36, 0x5d, 0xf7, 0xaa, 0xfe, 0x0a, 0x9b, 0x7b, 0xde, 0x33, 0xc2, 0x6e, 0x2e, 0x41, 0x42, 0x06, - 0x91, 0x02, 0xd5, 0xf9, 0x67, 0x3b, 0xdc, 0xfd, 0x4d, 0x87, 0xdb, 0x1a, 0x1c, 0xec, 0xef, 0x62, - 0x34, 0x06, 0x61, 0xf3, 0x6c, 0x6f, 0x84, 0x4f, 0x7f, 0x44, 0x78, 0x03, 0xec, 0xb2, 0x04, 0xb8, - 0x66, 0x13, 0x06, 0x72, 0x57, 0xf1, 0x7f, 0xbf, 0x5d, 0x72, 0x6f, 0xea, 0x71, 0xd8, 0xdc, 0x09, - 0xe8, 0x72, 0x4b, 0x9c, 0xd5, 0x96, 0x38, 0xeb, 0x2d, 0x71, 0x9e, 0x0c, 0x41, 0x4b, 0x43, 0xd0, - 0xca, 0x10, 0xb4, 0x36, 0x04, 0xbd, 0x19, 0x82, 0x5e, 0xde, 0x89, 0x73, 0xbf, 0x57, 0xf5, 0xf8, - 0x08, 0x00, 0x00, 0xff, 0xff, 0xff, 0x5f, 0x50, 0x3c, 0x80, 0x02, 0x00, 0x00, -} - -func (m *VersionSelection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VersionSelection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VersionSelection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Semver != nil { - { - size, err := m.Semver.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VersionSelectionSemver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VersionSelectionSemver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VersionSelectionSemver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Prereleases != nil { - { - size, err := m.Prereleases.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Constraints) - copy(dAtA[i:], m.Constraints) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Constraints))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VersionSelectionSemverPrereleases) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VersionSelectionSemverPrereleases) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VersionSelectionSemverPrereleases) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Identifiers) > 0 { - for iNdEx := len(m.Identifiers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Identifiers[iNdEx]) - copy(dAtA[i:], m.Identifiers[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Identifiers[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *VersionSelection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Semver != nil { - l = m.Semver.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VersionSelectionSemver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Constraints) - n += 1 + l + sovGenerated(uint64(l)) - if m.Prereleases != nil { - l = m.Prereleases.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VersionSelectionSemverPrereleases) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Identifiers) > 0 { - for _, s := range m.Identifiers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *VersionSelection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VersionSelection{`, - `Semver:` + strings.Replace(this.Semver.String(), "VersionSelectionSemver", "VersionSelectionSemver", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VersionSelectionSemver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VersionSelectionSemver{`, - `Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`, - `Prereleases:` + strings.Replace(this.Prereleases.String(), "VersionSelectionSemverPrereleases", "VersionSelectionSemverPrereleases", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VersionSelectionSemverPrereleases) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VersionSelectionSemverPrereleases{`, - `Identifiers:` + fmt.Sprintf("%v", this.Identifiers) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *VersionSelection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VersionSelection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VersionSelection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Semver", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Semver == nil { - m.Semver = &VersionSelectionSemver{} - } - if err := m.Semver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VersionSelectionSemver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VersionSelectionSemver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VersionSelectionSemver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Constraints = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prereleases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Prereleases == nil { - m.Prereleases = &VersionSelectionSemverPrereleases{} - } - if err := m.Prereleases.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VersionSelectionSemverPrereleases) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VersionSelectionSemverPrereleases: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VersionSelectionSemverPrereleases: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Identifiers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Identifiers = append(m.Identifiers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.proto b/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.proto deleted file mode 100644 index 537db467e..000000000 --- a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.proto +++ /dev/null @@ -1,30 +0,0 @@ - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package carvel.dev.vendir.pkg.vendir.versions.v1alpha1; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true -message VersionSelection { - optional VersionSelectionSemver semver = 1; -} - -// +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true -message VersionSelectionSemver { - optional string constraints = 1; - - optional VersionSelectionSemverPrereleases prereleases = 2; -} - -// +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true -message VersionSelectionSemverPrereleases { - repeated string identifiers = 1; -} - diff --git a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/zz_generated.deepcopy.go b/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 6c798ff5a..000000000 --- a/vendor/carvel.dev/vendir/pkg/vendir/versions/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionSelection) DeepCopyInto(out *VersionSelection) { - *out = *in - if in.Semver != nil { - in, out := &in.Semver, &out.Semver - *out = new(VersionSelectionSemver) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionSelection. -func (in *VersionSelection) DeepCopy() *VersionSelection { - if in == nil { - return nil - } - out := new(VersionSelection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionSelectionSemver) DeepCopyInto(out *VersionSelectionSemver) { - *out = *in - if in.Prereleases != nil { - in, out := &in.Prereleases, &out.Prereleases - *out = new(VersionSelectionSemverPrereleases) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionSelectionSemver. -func (in *VersionSelectionSemver) DeepCopy() *VersionSelectionSemver { - if in == nil { - return nil - } - out := new(VersionSelectionSemver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionSelectionSemverPrereleases) DeepCopyInto(out *VersionSelectionSemverPrereleases) { - *out = *in - if in.Identifiers != nil { - in, out := &in.Identifiers, &out.Identifiers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionSelectionSemverPrereleases. -func (in *VersionSelectionSemverPrereleases) DeepCopy() *VersionSelectionSemverPrereleases { - if in == nil { - return nil - } - out := new(VersionSelectionSemverPrereleases) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/cppforlife/cobrautil/Gopkg.lock b/vendor/github.com/cppforlife/cobrautil/Gopkg.lock deleted file mode 100644 index 04006fa30..000000000 --- a/vendor/github.com/cppforlife/cobrautil/Gopkg.lock +++ /dev/null @@ -1,36 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - pruneopts = "UT" - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" - name = "github.com/spf13/cobra" - packages = ["."] - pruneopts = "UT" - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" - -[[projects]] - digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" - version = "v1.0.2" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/spf13/cobra", - "github.com/spf13/pflag", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/cppforlife/cobrautil/Gopkg.toml b/vendor/github.com/cppforlife/cobrautil/Gopkg.toml deleted file mode 100644 index 7f72c415e..000000000 --- a/vendor/github.com/cppforlife/cobrautil/Gopkg.toml +++ /dev/null @@ -1,38 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/spf13/cobra" - version = "0.0.3" - -[[constraint]] - name = "github.com/spf13/pflag" - version = "1.0.2" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/cppforlife/cobrautil/LICENSE b/vendor/github.com/cppforlife/cobrautil/LICENSE deleted file mode 100644 index 145c03dcd..000000000 --- a/vendor/github.com/cppforlife/cobrautil/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2018 Dmitriy Kalinin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cppforlife/cobrautil/README.md b/vendor/github.com/cppforlife/cobrautil/README.md deleted file mode 100644 index d8c62eac4..000000000 --- a/vendor/github.com/cppforlife/cobrautil/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# cobrautil - -util package to help with https://github.com/spf13/cobra. diff --git a/vendor/github.com/cppforlife/cobrautil/flag_help_sections.go b/vendor/github.com/cppforlife/cobrautil/flag_help_sections.go deleted file mode 100644 index 4ad9fbbfe..000000000 --- a/vendor/github.com/cppforlife/cobrautil/flag_help_sections.go +++ /dev/null @@ -1,122 +0,0 @@ -package cobrautil - -import ( - "fmt" - "regexp" - "sort" - "strings" - - "github.com/spf13/cobra" -) - -type FlagHelpSection struct { - Title string - - PrefixMatch string - ExactMatch []string - NoneMatch bool -} - -func (s FlagHelpSection) Matches(name string) bool { - if len(s.PrefixMatch) > 0 && strings.HasPrefix(name, s.PrefixMatch+"-") { - return true - } - for _, em := range s.ExactMatch { - if name == em { - return true - } - } - return false -} - -type flagLine struct { - Line string - SectionIdxs []int -} - -func (l flagLine) InSectionIdx(idx int) bool { - for _, i := range l.SectionIdxs { - if i == idx { - return true - } - } - return false -} - -var ( - flagHelpFuncCount = 0 - flagNameRegexp = regexp.MustCompile("^\\s+(\\-[a-z], )?\\-\\-([a-z\\-]+)\\s+") -) - -func FlagHelpSectionsUsageTemplate(sections []FlagHelpSection) string { - flagHelpFuncCount += 1 - flagHelpFuncName := fmt.Sprintf("flagsWithSections%d", flagHelpFuncCount) - - cobra.AddTemplateFunc(flagHelpFuncName, func(str string) string { - lines := strings.Split(str, "\n") - flags := map[string]flagLine{} - flagNames := []string{} - - for _, line := range lines { - match := flagNameRegexp.FindStringSubmatch(line) - flagName := match[2] - - if _, found := flags[flagName]; found { - panic("Expected to not find multiple flags with same name") - } - - fline := flagLine{Line: line} - noneMatchIdx := -1 - - for i, section := range sections { - if section.Matches(flagName) { - fline.SectionIdxs = append(fline.SectionIdxs, i) - } - if section.NoneMatch { - noneMatchIdx = i - } - } - if len(fline.SectionIdxs) == 0 { - fline.SectionIdxs = []int{noneMatchIdx} - } - - flags[flagName] = fline - flagNames = append(flagNames, flagName) - } - - sort.Strings(flagNames) - - sectionsResult := []string{} - - for i, section := range sections { - result := section.Title + "\n" - for _, name := range flagNames { - fline := flags[name] - if fline.InSectionIdx(i) { - result += fline.Line + "\n" - } - } - sectionsResult = append(sectionsResult, result) - } - - return strings.TrimSpace(strings.Join(sectionsResult, "\n")) - }) - - unmodifiedCmd := &cobra.Command{} - usageTemplate := unmodifiedCmd.UsageTemplate() - - const defaultTpl = `{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}` - - if !strings.Contains(usageTemplate, defaultTpl) { - panic("Expected to find available flags section in spf13/cobra default usage template") - } - - newTpl := fmt.Sprintf(`{{if .HasAvailableLocalFlags}} - -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces | %s}}{{end}}`, flagHelpFuncName) - - return strings.Replace(usageTemplate, defaultTpl, newTpl, 1) -} diff --git a/vendor/github.com/cppforlife/cobrautil/help_sections.go b/vendor/github.com/cppforlife/cobrautil/help_sections.go deleted file mode 100644 index 5ff384659..000000000 --- a/vendor/github.com/cppforlife/cobrautil/help_sections.go +++ /dev/null @@ -1,75 +0,0 @@ -package cobrautil - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func init() { - cobra.AddTemplateFunc("commandsWithAnnotation", func(cmd *cobra.Command, key, value string) []*cobra.Command { - var result []*cobra.Command - for _, c := range cmd.Commands() { - anns := map[string]string{} - if c.Annotations != nil { - anns = c.Annotations - } - if anns[key] == value { - result = append(result, c) - } - } - return result - }) -} - -type HelpSection struct { - Key string - Value string - Title string -} - -func HelpSectionsUsageTemplate(sections []HelpSection) string { - usageTemplate := `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` - - const defaultTpl = `{{if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}` - - newTpl := "{{if .HasAvailableSubCommands}}" - - for _, section := range sections { - newTpl += fmt.Sprintf(`{{$cmds := (commandsWithAnnotation . "%s" "%s")}}{{if $cmds}} - -%s{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}`, section.Key, section.Value, section.Title) - } - - newTpl += "{{end}}" - - return strings.Replace(usageTemplate, defaultTpl, newTpl, 1) -} diff --git a/vendor/github.com/cppforlife/cobrautil/misc.go b/vendor/github.com/cppforlife/cobrautil/misc.go deleted file mode 100644 index 29edce0a9..000000000 --- a/vendor/github.com/cppforlife/cobrautil/misc.go +++ /dev/null @@ -1,118 +0,0 @@ -package cobrautil - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -type ReconfigureFunc func(cmd *cobra.Command) - -func VisitCommands(cmd *cobra.Command, fns ...ReconfigureFunc) { - for _, f := range fns { - f(cmd) - } - for _, child := range cmd.Commands() { - VisitCommands(child, fns...) - } -} - -func ReconfigureLeafCmds(fs ...func(cmd *cobra.Command)) ReconfigureFunc { - return func(cmd *cobra.Command) { - if len(cmd.Commands()) > 0 { - return - } - - for _, f := range fs { - f(cmd) - } - } -} - -func WrapRunEForCmd(additionalRunE func(*cobra.Command, []string) error) ReconfigureFunc { - return func(cmd *cobra.Command) { - if cmd.RunE == nil { - panic(fmt.Sprintf("Internal: Command '%s' does not set RunE", cmd.CommandPath())) - } - - origRunE := cmd.RunE - cmd.RunE = func(cmd2 *cobra.Command, args []string) error { - err := additionalRunE(cmd2, args) - if err != nil { - return err - } - return origRunE(cmd2, args) - } - } -} - -// ReconfigureFuncs - -func ReconfigureCmdWithSubcmd(cmd *cobra.Command) { - if len(cmd.Commands()) == 0 { - return - } - - if cmd.Args == nil { - cmd.Args = cobra.ArbitraryArgs - } - if cmd.RunE == nil { - cmd.RunE = ShowSubcommands - } - - var strs []string - for _, subcmd := range cmd.Commands() { - if !subcmd.Hidden { - strs = append(strs, subcmd.Name()) - } - } - - cmd.Short += " (" + strings.Join(strs, ", ") + ")" -} - -func DisallowExtraArgs(cmd *cobra.Command) { - WrapRunEForCmd(func(cmd2 *cobra.Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("command '%s' does not accept extra arguments '%s'", cmd2.CommandPath(), args[0]) - } - return nil - })(cmd) - cmd.Args = cobra.ArbitraryArgs -} - -// New RunE's - -func ShowSubcommands(cmd *cobra.Command, args []string) error { - var strs []string - for _, subcmd := range cmd.Commands() { - if !subcmd.Hidden { - strs = append(strs, subcmd.Name()) - } - } - return fmt.Errorf("Use one of available subcommands: %s", strings.Join(strs, ", ")) -} - -func ShowHelp(cmd *cobra.Command, args []string) error { - cmd.Help() - return fmt.Errorf("Invalid command - see available commands/subcommands above") -} - -func IsCobraManagedCommand(args []string) bool { - if len(args) > 1 { - cmdPathPieces := args[1:] - - var cmdName string // first "non-flag" arguments - for _, arg := range cmdPathPieces { - if !strings.HasPrefix(arg, "-") { - cmdName = arg - break - } - } - switch cmdName { - case "help", cobra.ShellCompRequestCmd, cobra.ShellCompNoDescRequestCmd: - return true - } - } - return false -} diff --git a/vendor/github.com/cppforlife/cobrautil/resolvable_flags.go b/vendor/github.com/cppforlife/cobrautil/resolvable_flags.go deleted file mode 100644 index 0aa1f8de4..000000000 --- a/vendor/github.com/cppforlife/cobrautil/resolvable_flags.go +++ /dev/null @@ -1,26 +0,0 @@ -package cobrautil - -import ( - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type ResolvableFlag interface { - Resolve() error -} - -func ResolveFlagsForCmd(cmd *cobra.Command, args []string) error { - var lastFlagErr error - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if flag.Value == nil { - return - } - if resolvableVal, ok := flag.Value.(ResolvableFlag); ok { - err := resolvableVal.Resolve() - if err != nil { - lastFlagErr = err - } - } - }) - return lastFlagErr -} diff --git a/vendor/github.com/cppforlife/color/LICENSE.md b/vendor/github.com/cppforlife/color/LICENSE.md deleted file mode 100644 index 25fdaf639..000000000 --- a/vendor/github.com/cppforlife/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cppforlife/color/README.md b/vendor/github.com/cppforlife/color/README.md deleted file mode 100644 index 0e72ed009..000000000 --- a/vendor/github.com/cppforlife/color/README.md +++ /dev/null @@ -1,176 +0,0 @@ -!!! Forked from https://github.com/cppforlife/color !!! - -# Color - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - - -![Color](https://i.imgur.com/c1JI0lA.png) - - -## Install - -```bash -go get github.com/fatih/color -``` - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Use your own output (io.Writer) - -```go -// Use your own io.Writer output -color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - -blue := color.New(color.FgBlue) -blue.Fprint(writer, "This will print text in blue.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Custom fprint functions (FprintFunc) - -```go -blue := color.New(FgBlue).FprintfFunc() -blue(myWriter, "important notice: %s", stars) - -// Mix up with multiple attributes -success := color.New(color.Bold, color.FgGreen).FprintlnFunc() -success(myWriter, "Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`) - -`Color` has support to disable/enable colors both globally and for single color -definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You -can easily disable the color output with: - -```go - -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details - diff --git a/vendor/github.com/cppforlife/color/color.go b/vendor/github.com/cppforlife/color/color.go deleted file mode 100644 index 09f52d3a1..000000000 --- a/vendor/github.com/cppforlife/color/color.go +++ /dev/null @@ -1,615 +0,0 @@ -package color - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // NoColor defines if the output is colorized or not. It's dynamically set to - // false or true based on the stdout's file descriptor referring to a terminal - // or not. This is a global option and affects all colors. For more control - // over each color block use the methods DisableColor() individually. - // To force color display, set the variable FORCE_COLOR = "1" - NoColor = noColorWithEnvVars(os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))) - - // Output defines the standard output of the print functions. By default - // os.Stdout is used. - Output = colorable.NewColorableStdout() - - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() - - // colorsCache is used to reduce the count of created Color objects and - // allows to reuse already created objects with required Attribute. - colorsCache = make(map[Attribute]*Color) - colorsCacheMu sync.Mutex // protects colorsCache -) - -func noColorWithEnvVars(noColor bool) bool { - switch os.Getenv("FORCE_COLOR") { - case "0": // disable color - return true - case "1", "2", "3": // enable color - return false - default: - return noColor - } -} - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{params: make([]Attribute, 0)} - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -func (c *Color) setWriter(w io.Writer) *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(w, c.format()) - return c -} - -func (c *Color) unsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } - - if NoColor { - return - } - - fmt.Fprintf(w, "%s[%dm", escape, Reset) -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - -// Fprint formats using the default formats for its operands and writes to w. -// Spaces are added between operands when neither is a string. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprint(w, a...) -} - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Fprintf formats according to a format specifier and writes to w. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintf(w, format, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Fprintln formats using the default formats for its operands and writes to w. -// Spaces are always added between operands and a newline is appended. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// Sprint is just like Print, but returns a string instead of printing it. -func (c *Color) Sprint(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) -} - -// Sprintln is just like Println, but returns a string instead of printing it. -func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) -} - -// Sprintf is just like Printf, but returns a string instead of printing it. -func (c *Color) Sprintf(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) -} - -// FprintFunc returns a new function that prints the passed arguments as -// colorized with color.Fprint(). -func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprint(w, a...) - } -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Print(a...) - } -} - -// FprintfFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintf(). -func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { - return func(w io.Writer, format string, a ...interface{}) { - c.Fprintf(w, format, a...) - } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { - c.Printf(format, a...) - } -} - -// FprintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintln(). -func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprintln(w, a...) - } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Println(a...) - } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user setted action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is a convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is a convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is a convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is a convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is a convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is a convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is a convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is a convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is a convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is a convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is a convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is a convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is a convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is a convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is a convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is a convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } - -// HiBlack is a convenient helper function to print with hi-intensity black foreground. A -// newline is appended to format by default. -func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } - -// HiRed is a convenient helper function to print with hi-intensity red foreground. A -// newline is appended to format by default. -func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } - -// HiGreen is a convenient helper function to print with hi-intensity green foreground. A -// newline is appended to format by default. -func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } - -// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. -// A newline is appended to format by default. -func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } - -// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A -// newline is appended to format by default. -func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } - -// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. -// A newline is appended to format by default. -func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } - -// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A -// newline is appended to format by default. -func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } - -// HiWhite is a convenient helper function to print with hi-intensity white foreground. A -// newline is appended to format by default. -func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } - -// HiBlackString is a convenient helper function to return a string with hi-intensity black -// foreground. -func HiBlackString(format string, a ...interface{}) string { - return colorString(format, FgHiBlack, a...) -} - -// HiRedString is a convenient helper function to return a string with hi-intensity red -// foreground. -func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } - -// HiGreenString is a convenient helper function to return a string with hi-intensity green -// foreground. -func HiGreenString(format string, a ...interface{}) string { - return colorString(format, FgHiGreen, a...) -} - -// HiYellowString is a convenient helper function to return a string with hi-intensity yellow -// foreground. -func HiYellowString(format string, a ...interface{}) string { - return colorString(format, FgHiYellow, a...) -} - -// HiBlueString is a convenient helper function to return a string with hi-intensity blue -// foreground. -func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } - -// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta -// foreground. -func HiMagentaString(format string, a ...interface{}) string { - return colorString(format, FgHiMagenta, a...) -} - -// HiCyanString is a convenient helper function to return a string with hi-intensity cyan -// foreground. -func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } - -// HiWhiteString is a convenient helper function to return a string with hi-intensity white -// foreground. -func HiWhiteString(format string, a ...interface{}) string { - return colorString(format, FgHiWhite, a...) -} diff --git a/vendor/github.com/cppforlife/color/doc.go b/vendor/github.com/cppforlife/color/doc.go deleted file mode 100644 index cf1e96500..000000000 --- a/vendor/github.com/cppforlife/color/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") - -However there are times where custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - -You can also FprintXxx functions to pass your own io.Writer: - - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") - - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/vendor/github.com/cppforlife/go-cli-ui/LICENSE b/vendor/github.com/cppforlife/go-cli-ui/LICENSE deleted file mode 100644 index 145c03dcd..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2018 Dmitriy Kalinin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/color_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/color_ui.go deleted file mode 100644 index 2767c5fb7..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/color_ui.go +++ /dev/null @@ -1,103 +0,0 @@ -package ui - -import ( - "github.com/cppforlife/color" - - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type ColorUI struct { - parent UI - okFunc func(string, ...interface{}) string - errFunc func(string, ...interface{}) string - boldFunc func(string, ...interface{}) string -} - -func NewColorUI(parent UI) *ColorUI { - return &ColorUI{ - parent: parent, - okFunc: color.New(color.FgGreen).SprintfFunc(), - errFunc: color.New(color.FgRed).SprintfFunc(), - boldFunc: color.New(color.Bold).SprintfFunc(), - } -} - -func (ui *ColorUI) ErrorLinef(pattern string, args ...interface{}) { - ui.parent.ErrorLinef("%s", ui.errFunc(pattern, args...)) -} - -func (ui *ColorUI) PrintLinef(pattern string, args ...interface{}) { - ui.parent.PrintLinef(pattern, args...) -} - -func (ui *ColorUI) BeginLinef(pattern string, args ...interface{}) { - ui.parent.BeginLinef(pattern, args...) -} - -func (ui *ColorUI) EndLinef(pattern string, args ...interface{}) { - ui.parent.EndLinef(pattern, args...) -} - -func (ui *ColorUI) PrintBlock(block []byte) { - ui.parent.PrintBlock(block) -} - -func (ui *ColorUI) PrintErrorBlock(block string) { - ui.parent.PrintErrorBlock(ui.errFunc("%s", block)) -} - -func (ui *ColorUI) PrintTable(table Table) { - table.HeaderFormatFunc = ui.boldFunc - - for k, s := range table.Sections { - for i, r := range s.Rows { - for j, v := range r { - table.Sections[k].Rows[i][j] = ui.colorValueFmt(v) - } - } - } - - for i, r := range table.Rows { - for j, v := range r { - table.Rows[i][j] = ui.colorValueFmt(v) - } - } - - ui.parent.PrintTable(table) -} - -func (ui *ColorUI) AskForText(label string) (string, error) { - return ui.parent.AskForText(label) -} - -func (ui *ColorUI) AskForChoice(label string, options []string) (int, error) { - return ui.parent.AskForChoice(label, options) -} - -func (ui *ColorUI) AskForPassword(label string) (string, error) { - return ui.parent.AskForPassword(label) -} - -func (ui *ColorUI) AskForConfirmation() error { - return ui.parent.AskForConfirmation() -} - -func (ui *ColorUI) IsInteractive() bool { - return ui.parent.IsInteractive() -} - -func (ui *ColorUI) Flush() { - ui.parent.Flush() -} - -func (ui *ColorUI) colorValueFmt(val Value) Value { - if valFmt, ok := val.(ValueFmt); ok { - if valFmt.Error { - valFmt.Func = ui.errFunc - } else { - valFmt.Func = ui.okFunc - } - return valFmt - } - return val -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/combo_writer.go b/vendor/github.com/cppforlife/go-cli-ui/ui/combo_writer.go deleted file mode 100644 index f3ba3c8f0..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/combo_writer.go +++ /dev/null @@ -1,57 +0,0 @@ -package ui - -import ( - "io" - "strings" - "sync" -) - -type ComboWriter struct { - ui UI - uiLock sync.Mutex - onNewLine bool -} - -type prefixedWriter struct { - w *ComboWriter - prefix string -} - -func NewComboWriter(ui UI) *ComboWriter { - return &ComboWriter{ui: ui, onNewLine: true} -} - -func (w *ComboWriter) Writer(prefix string) io.Writer { - return prefixedWriter{w: w, prefix: prefix} -} - -func (s prefixedWriter) Write(bytes []byte) (int, error) { - if len(bytes) == 0 { - return 0, nil - } - - s.w.uiLock.Lock() - defer s.w.uiLock.Unlock() - - lines := strings.Split(string(bytes), "\n") - - for i, line := range lines { - lastLine := i == len(lines)-1 - - if !lastLine || len(line) > 0 { - if s.w.onNewLine { - s.w.ui.PrintBlock([]byte(s.prefix)) - } - - s.w.ui.PrintBlock([]byte(line)) - s.w.onNewLine = false - - if !lastLine { - s.w.ui.PrintBlock([]byte("\n")) - s.w.onNewLine = true - } - } - } - - return len(bytes), nil -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/conf_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/conf_ui.go deleted file mode 100644 index 8016d9730..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/conf_ui.go +++ /dev/null @@ -1,114 +0,0 @@ -package ui - -import ( - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type ConfUI struct { - parent UI - isTTY bool - logger ExternalLogger - showColumns []Header -} - -func NewConfUI(logger ExternalLogger) *ConfUI { - var ui UI - - writerUI := NewConsoleUI(logger) - ui = NewPaddingUI(writerUI) - - return &ConfUI{ - parent: ui, - isTTY: writerUI.IsTTY(), - logger: logger, - } -} - -func NewWrappingConfUI(parent UI, logger ExternalLogger) *ConfUI { - return &ConfUI{ - parent: parent, - isTTY: true, - logger: logger, - } -} - -func (ui *ConfUI) EnableTTY(force bool) { - if !ui.isTTY && !force { - ui.parent = NewNonTTYUI(ui.parent) - } -} - -func (ui *ConfUI) EnableColor() { - ui.parent = NewColorUI(ui.parent) -} - -func (ui *ConfUI) EnableJSON() { - ui.parent = NewJSONUI(ui.parent, ui.logger) -} - -func (ui *ConfUI) ShowColumns(columns []Header) { - ui.showColumns = columns -} - -func (ui *ConfUI) EnableNonInteractive() { - ui.parent = NewNonInteractiveUI(ui.parent) -} - -func (ui *ConfUI) ErrorLinef(pattern string, args ...interface{}) { - ui.parent.ErrorLinef(pattern, args...) -} - -func (ui *ConfUI) PrintLinef(pattern string, args ...interface{}) { - ui.parent.PrintLinef(pattern, args...) -} - -func (ui *ConfUI) BeginLinef(pattern string, args ...interface{}) { - ui.parent.BeginLinef(pattern, args...) -} - -func (ui *ConfUI) EndLinef(pattern string, args ...interface{}) { - ui.parent.EndLinef(pattern, args...) -} - -func (ui *ConfUI) PrintBlock(block []byte) { - ui.parent.PrintBlock(block) -} - -func (ui *ConfUI) PrintErrorBlock(block string) { - ui.parent.PrintErrorBlock(block) -} - -func (ui *ConfUI) PrintTable(table Table) { - if len(ui.showColumns) > 0 { - err := table.SetColumnVisibility(ui.showColumns) - if err != nil { - panic(err) - } - } - - ui.parent.PrintTable(table) -} - -func (ui *ConfUI) AskForText(label string) (string, error) { - return ui.parent.AskForText(label) -} - -func (ui *ConfUI) AskForChoice(label string, options []string) (int, error) { - return ui.parent.AskForChoice(label, options) -} - -func (ui *ConfUI) AskForPassword(label string) (string, error) { - return ui.parent.AskForPassword(label) -} - -func (ui *ConfUI) AskForConfirmation() error { - return ui.parent.AskForConfirmation() -} - -func (ui *ConfUI) IsInteractive() bool { - return ui.parent.IsInteractive() -} - -func (ui *ConfUI) Flush() { - ui.parent.Flush() -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/indenting_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/indenting_ui.go deleted file mode 100644 index b2a3a6d25..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/indenting_ui.go +++ /dev/null @@ -1,67 +0,0 @@ -package ui - -import ( - "fmt" - - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type IndentingUI struct { - parent UI -} - -func NewIndentingUI(parent UI) *IndentingUI { - return &IndentingUI{parent: parent} -} - -func (ui *IndentingUI) ErrorLinef(pattern string, args ...interface{}) { - ui.parent.ErrorLinef(" %s", fmt.Sprintf(pattern, args...)) -} - -func (ui *IndentingUI) PrintLinef(pattern string, args ...interface{}) { - ui.parent.PrintLinef(" %s", fmt.Sprintf(pattern, args...)) -} - -func (ui *IndentingUI) BeginLinef(pattern string, args ...interface{}) { - ui.parent.BeginLinef(" %s", fmt.Sprintf(pattern, args...)) -} - -func (ui *IndentingUI) EndLinef(pattern string, args ...interface{}) { - ui.parent.EndLinef(pattern, args...) -} - -func (ui *IndentingUI) PrintBlock(block []byte) { - ui.parent.PrintBlock(block) -} - -func (ui *IndentingUI) PrintErrorBlock(block string) { - ui.parent.PrintErrorBlock(block) -} - -func (ui *IndentingUI) PrintTable(table Table) { - ui.parent.PrintTable(table) -} - -func (ui *IndentingUI) AskForText(label string) (string, error) { - return ui.parent.AskForText(label) -} - -func (ui *IndentingUI) AskForChoice(label string, options []string) (int, error) { - return ui.parent.AskForChoice(label, options) -} - -func (ui *IndentingUI) AskForPassword(label string) (string, error) { - return ui.parent.AskForPassword(label) -} - -func (ui *IndentingUI) AskForConfirmation() error { - return ui.parent.AskForConfirmation() -} - -func (ui *IndentingUI) IsInteractive() bool { - return ui.parent.IsInteractive() -} - -func (ui *IndentingUI) Flush() { - ui.parent.Flush() -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/interfaces.go b/vendor/github.com/cppforlife/go-cli-ui/ui/interfaces.go deleted file mode 100644 index 4ffd5e941..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/interfaces.go +++ /dev/null @@ -1,34 +0,0 @@ -package ui - -import ( - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type UI interface { - ErrorLinef(pattern string, args ...interface{}) - PrintLinef(pattern string, args ...interface{}) - - BeginLinef(pattern string, args ...interface{}) - EndLinef(pattern string, args ...interface{}) - - PrintBlock([]byte) // takes []byte to avoid string copy - PrintErrorBlock(string) - - PrintTable(Table) - - AskForText(label string) (string, error) - AskForChoice(label string, options []string) (int, error) - AskForPassword(label string) (string, error) - - // AskForConfirmation returns error if user doesnt want to continue - AskForConfirmation() error - - IsInteractive() bool - - Flush() -} - -type ExternalLogger interface { - Error(tag, msg string, args ...interface{}) - Debug(tag, msg string, args ...interface{}) -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/json_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/json_ui.go deleted file mode 100644 index cc98cd0bb..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/json_ui.go +++ /dev/null @@ -1,159 +0,0 @@ -package ui - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type JSONUI struct { - parent UI - uiResp JSONUIResp - - logTag string - logger ExternalLogger -} - -type JSONUIResp struct { - Tables []JSONUITableResp - Blocks []string - Lines []string -} - -type JSONUITableResp struct { - Content string - Header map[string]string - Rows []map[string]string - Notes []string -} - -func NewJSONUI(parent UI, logger ExternalLogger) *JSONUI { - return &JSONUI{parent: parent, logTag: "JSONUI", logger: logger} -} - -func (ui *JSONUI) ErrorLinef(pattern string, args ...interface{}) { - ui.addLine(pattern, args) -} - -func (ui *JSONUI) PrintLinef(pattern string, args ...interface{}) { - ui.addLine(pattern, args) -} - -func (ui *JSONUI) BeginLinef(pattern string, args ...interface{}) { - ui.addLine(pattern, args) -} - -func (ui *JSONUI) EndLinef(pattern string, args ...interface{}) { - ui.addLine(pattern, args) -} - -func (ui *JSONUI) PrintBlock(block []byte) { - ui.uiResp.Blocks = append(ui.uiResp.Blocks, string(block)) -} - -func (ui *JSONUI) PrintErrorBlock(block string) { - ui.uiResp.Blocks = append(ui.uiResp.Blocks, block) -} - -func (ui *JSONUI) PrintTable(table Table) { - table.FillFirstColumn = true - - header := map[string]string{} - - if len(table.Header) > 0 { - for i, val := range table.Header { - if val.Hidden { - continue - } - - if val.Key == string(UNKNOWN_HEADER_MAPPING) { - table.Header[i].Key = strconv.Itoa(i) - } - - header[table.Header[i].Key] = val.Title - } - } else if len(table.AsRows()) > 0 { - var rawHeaders []Header - for i, _ := range table.AsRows()[0] { - val := Header{ - Key: fmt.Sprintf("col_%d", i), - Hidden: false, - } - header[val.Key] = val.Title - rawHeaders = append(rawHeaders, val) - } - table.Header = rawHeaders - } - - resp := JSONUITableResp{ - Content: table.Content, - Header: header, - Rows: ui.stringRows(table.Header, table.AsRows()), - Notes: table.Notes, - } - - ui.uiResp.Tables = append(ui.uiResp.Tables, resp) -} - -func (ui *JSONUI) AskForText(_ string) (string, error) { - panic("Cannot ask for input in JSON UI") -} - -func (ui *JSONUI) AskForChoice(_ string, _ []string) (int, error) { - panic("Cannot ask for a choice in JSON UI") -} - -func (ui *JSONUI) AskForPassword(_ string) (string, error) { - panic("Cannot ask for password in JSON UI") -} - -func (ui *JSONUI) AskForConfirmation() error { - panic("Cannot ask for confirmation in JSON UI") -} - -func (ui *JSONUI) IsInteractive() bool { - return ui.parent.IsInteractive() -} - -func (ui *JSONUI) Flush() { - defer ui.parent.Flush() - - if !reflect.DeepEqual(ui.uiResp, JSONUIResp{}) { - bytes, err := json.MarshalIndent(ui.uiResp, "", " ") - if err != nil { - ui.logger.Error(ui.logTag, "Failed to marshal UI response") - return - } - - ui.parent.PrintBlock(bytes) - } -} - -func (ui *JSONUI) stringRows(header []Header, rows [][]Value) []map[string]string { - result := []map[string]string{} - - for _, row := range rows { - data := map[string]string{} - - for i, col := range row { - if header[i].Hidden { - continue - } - - data[header[i].Key] = col.String() - } - - result = append(result, data) - } - - return result -} - -func (ui *JSONUI) addLine(pattern string, args []interface{}) { - msg := fmt.Sprintf(pattern, args...) - ui.uiResp.Lines = append(ui.uiResp.Lines, msg) - ui.logger.Debug(ui.logTag, msg) -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/non_interactive.go b/vendor/github.com/cppforlife/go-cli-ui/ui/non_interactive.go deleted file mode 100644 index 33f087b8a..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/non_interactive.go +++ /dev/null @@ -1,66 +0,0 @@ -package ui - -import ( - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type NonInteractiveUI struct { - parent UI -} - -func NewNonInteractiveUI(parent UI) *NonInteractiveUI { - return &NonInteractiveUI{parent: parent} -} - -func (ui *NonInteractiveUI) ErrorLinef(pattern string, args ...interface{}) { - ui.parent.ErrorLinef(pattern, args...) -} - -func (ui *NonInteractiveUI) PrintLinef(pattern string, args ...interface{}) { - ui.parent.PrintLinef(pattern, args...) -} - -func (ui *NonInteractiveUI) BeginLinef(pattern string, args ...interface{}) { - ui.parent.BeginLinef(pattern, args...) -} - -func (ui *NonInteractiveUI) EndLinef(pattern string, args ...interface{}) { - ui.parent.EndLinef(pattern, args...) -} - -func (ui *NonInteractiveUI) PrintBlock(block []byte) { - ui.parent.PrintBlock(block) -} - -func (ui *NonInteractiveUI) PrintErrorBlock(block string) { - ui.parent.PrintErrorBlock(block) -} - -func (ui *NonInteractiveUI) PrintTable(table Table) { - ui.parent.PrintTable(table) -} - -func (ui *NonInteractiveUI) AskForText(label string) (string, error) { - panic("Cannot ask for input in non-interactive UI") -} - -func (ui *NonInteractiveUI) AskForChoice(label string, options []string) (int, error) { - panic("Cannot ask for a choice in non-interactive UI") -} - -func (ui *NonInteractiveUI) AskForPassword(label string) (string, error) { - panic("Cannot ask for password in non-interactive UI") -} - -func (ui *NonInteractiveUI) AskForConfirmation() error { - // Always respond successfully - return nil -} - -func (ui *NonInteractiveUI) IsInteractive() bool { - return false -} - -func (ui *NonInteractiveUI) Flush() { - ui.parent.Flush() -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/non_tty_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/non_tty_ui.go deleted file mode 100644 index bc6375a1e..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/non_tty_ui.go +++ /dev/null @@ -1,64 +0,0 @@ -package ui - -import ( - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type NonTTYUI struct { - parent UI -} - -func NewNonTTYUI(parent UI) *NonTTYUI { - return &NonTTYUI{parent: parent} -} - -func (ui *NonTTYUI) ErrorLinef(pattern string, args ...interface{}) { - ui.parent.ErrorLinef(pattern, args...) -} - -func (ui *NonTTYUI) PrintLinef(pattern string, args ...interface{}) {} -func (ui *NonTTYUI) BeginLinef(pattern string, args ...interface{}) {} -func (ui *NonTTYUI) EndLinef(pattern string, args ...interface{}) {} - -func (ui *NonTTYUI) PrintBlock(block []byte) { ui.parent.PrintBlock(block) } -func (ui *NonTTYUI) PrintErrorBlock(block string) { ui.parent.PrintErrorBlock(block) } - -func (ui *NonTTYUI) PrintTable(table Table) { - // hide decorations - table.Title = "" - table.Notes = nil - table.Content = "" - table.DataOnly = true - - // necessary for grep - table.FillFirstColumn = true - - // cut's default delim - table.BorderStr = "\t" - - ui.parent.PrintTable(table) -} - -func (ui *NonTTYUI) AskForText(label string) (string, error) { - return ui.parent.AskForText(label) -} - -func (ui *NonTTYUI) AskForChoice(label string, options []string) (int, error) { - return ui.parent.AskForChoice(label, options) -} - -func (ui *NonTTYUI) AskForPassword(label string) (string, error) { - return ui.parent.AskForPassword(label) -} - -func (ui *NonTTYUI) AskForConfirmation() error { - return ui.parent.AskForConfirmation() -} - -func (ui *NonTTYUI) IsInteractive() bool { - return ui.parent.IsInteractive() -} - -func (ui *NonTTYUI) Flush() { - ui.parent.Flush() -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/noop_logger.go b/vendor/github.com/cppforlife/go-cli-ui/ui/noop_logger.go deleted file mode 100644 index 047b5a159..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/noop_logger.go +++ /dev/null @@ -1,12 +0,0 @@ -package ui - -type NoopLogger struct{} - -func NewNoopLogger() NoopLogger { - return NoopLogger{} -} - -var _ ExternalLogger = NoopLogger{} - -func (l NoopLogger) Error(tag, msg string, args ...interface{}) {} -func (l NoopLogger) Debug(tag, msg string, args ...interface{}) {} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/noop_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/noop_ui.go deleted file mode 100644 index aeb22fb87..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/noop_ui.go +++ /dev/null @@ -1,9 +0,0 @@ -package ui - -import ( - "io/ioutil" -) - -func NewNoopUI() *WriterUI { - return NewWriterUI(ioutil.Discard, ioutil.Discard, NewNoopLogger()) -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/padding_ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/padding_ui.go deleted file mode 100644 index 6a6fa493d..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/padding_ui.go +++ /dev/null @@ -1,100 +0,0 @@ -package ui - -import ( - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type paddingUIMode int - -const ( - paddingUIModeNone paddingUIMode = iota - paddingUIModeRaw - paddingUIModeAuto - paddingUIModeAskText -) - -type PaddingUI struct { - parent UI - prevMode paddingUIMode -} - -func NewPaddingUI(parent UI) *PaddingUI { - return &PaddingUI{parent: parent} -} - -func (ui *PaddingUI) ErrorLinef(pattern string, args ...interface{}) { - ui.padBefore(paddingUIModeAuto) - ui.parent.ErrorLinef(pattern, args...) -} - -func (ui *PaddingUI) PrintLinef(pattern string, args ...interface{}) { - ui.padBefore(paddingUIModeAuto) - ui.parent.PrintLinef(pattern, args...) -} - -func (ui *PaddingUI) BeginLinef(pattern string, args ...interface{}) { - ui.padBefore(paddingUIModeRaw) - ui.parent.BeginLinef(pattern, args...) -} - -func (ui *PaddingUI) EndLinef(pattern string, args ...interface{}) { - ui.padBefore(paddingUIModeRaw) - ui.parent.EndLinef(pattern, args...) -} - -func (ui *PaddingUI) PrintBlock(block []byte) { - ui.padBefore(paddingUIModeRaw) - ui.parent.PrintBlock(block) -} - -func (ui *PaddingUI) PrintErrorBlock(block string) { - ui.padBefore(paddingUIModeRaw) - ui.parent.PrintErrorBlock(block) -} - -func (ui *PaddingUI) PrintTable(table Table) { - ui.padBefore(paddingUIModeAuto) - ui.parent.PrintTable(table) -} - -func (ui *PaddingUI) AskForText(label string) (string, error) { - ui.padBefore(paddingUIModeAskText) - return ui.parent.AskForText(label) -} - -func (ui *PaddingUI) AskForChoice(label string, options []string) (int, error) { - ui.padBefore(paddingUIModeAuto) - return ui.parent.AskForChoice(label, options) -} - -func (ui *PaddingUI) AskForPassword(label string) (string, error) { - ui.padBefore(paddingUIModeAskText) - return ui.parent.AskForPassword(label) -} - -func (ui *PaddingUI) AskForConfirmation() error { - ui.padBefore(paddingUIModeAuto) - return ui.parent.AskForConfirmation() -} - -func (ui *PaddingUI) IsInteractive() bool { - return ui.parent.IsInteractive() -} - -func (ui *PaddingUI) Flush() { - ui.parent.Flush() -} - -func (ui *PaddingUI) padBefore(currMode paddingUIMode) { - switch { - case ui.prevMode == paddingUIModeNone: - // do nothing on the first time UI is called - case ui.prevMode == paddingUIModeAskText && currMode == paddingUIModeAskText: - // do nothing - case ui.prevMode == paddingUIModeRaw && currMode == paddingUIModeRaw: - // do nothing - default: - ui.parent.PrintLinef("") - } - ui.prevMode = currMode -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/table/headers.go b/vendor/github.com/cppforlife/go-cli-ui/ui/table/headers.go deleted file mode 100644 index eedc7ff12..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/table/headers.go +++ /dev/null @@ -1,69 +0,0 @@ -package table - -import ( - "fmt" - "strings" - "unicode" -) - -const UNKNOWN_HEADER_MAPPING rune = '_' - -func NewHeader(title string) Header { - return Header{ - Key: KeyifyHeader(title), - Title: title, - Hidden: false, - } -} - -func (t *Table) SetColumnVisibility(headers []Header) error { - for tableHeaderIdx, _ := range t.Header { - t.Header[tableHeaderIdx].Hidden = true - } - - for _, header := range headers { - foundHeader := false - - for tableHeaderIdx, tableHeader := range t.Header { - if tableHeader.Key == header.Key || tableHeader.Title == header.Title { - t.Header[tableHeaderIdx].Hidden = false - foundHeader = true - - break - } - } - - if !foundHeader { - // key may be empty; if title is present - return fmt.Errorf("Failed to find header: %s", header.Key) - } - } - - return nil -} - -func KeyifyHeader(header string) string { - splittedStrings := strings.Split(cleanHeader(header), " ") - splittedTrimmedStrings := []string{} - for _, s := range splittedStrings { - if s != "" { - splittedTrimmedStrings = append(splittedTrimmedStrings, s) - } - } - - join := strings.Join(splittedTrimmedStrings, "_") - if len(join) == 0 { - return string(UNKNOWN_HEADER_MAPPING) - } - return join -} - -func cleanHeader(header string) string { - return strings.Map(func(r rune) rune { - if unicode.IsLetter(r) || unicode.IsNumber(r) { - return unicode.ToLower(r) - } else { - return ' ' - } - }, header) -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/table/interfaces.go b/vendor/github.com/cppforlife/go-cli-ui/ui/table/interfaces.go deleted file mode 100644 index bf9ddd3a8..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/table/interfaces.go +++ /dev/null @@ -1,94 +0,0 @@ -package table - -import ( - "time" -) - -type Table struct { - Title string - Content string - - Header []Header - - SortBy []ColumnSort - - // Either sections or rows should be provided - Sections []Section - Rows [][]Value - - Notes []string - - // Formatting - HeaderFormatFunc func(string, ...interface{}) string - DataOnly bool - FillFirstColumn bool - DuplicateStr string - BackgroundStr string - BorderStr string - Transpose bool -} - -type Header struct { - Key string - Title string - Hidden bool -} - -type Section struct { - FirstColumn Value - Rows [][]Value -} - -type ColumnSort struct { - Column int - Asc bool -} - -type Value interface { - Value() Value - String() string - Compare(Value) int -} - -type ValueString struct { - S string -} - -type EmptyValue struct{} - -type ValueStrings struct { - S []string -} - -type ValueInt struct { - I int -} - -type ValueTime struct { - T time.Time -} - -type ValueBool struct { - B bool -} - -type ValueInterface struct { - I interface{} -} - -type ValueError struct { - E error -} - -type ValueNone struct{} - -type ValueFmt struct { - V Value - Error bool - Func func(string, ...interface{}) string -} - -type ValueSuffix struct { - V Value - Suffix string -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/table/sorting.go b/vendor/github.com/cppforlife/go-cli-ui/ui/table/sorting.go deleted file mode 100644 index c8c263d30..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/table/sorting.go +++ /dev/null @@ -1,40 +0,0 @@ -package table - -import ( - "math" -) - -type Sorting struct { - SortBy []ColumnSort - Rows [][]Value -} - -func (s Sorting) Len() int { return len(s.Rows) } - -func (s Sorting) Less(i, j int) bool { - var leftScore, rightScore float64 - - for ci, cs := range s.SortBy { - var left, right Value - - left = s.Rows[i][cs.Column].Value() - right = s.Rows[j][cs.Column].Value() - - c := left.Compare(right) - - if c == 0 { - leftScore += 1000 * math.Pow10(10-ci) - rightScore += 1000 * math.Pow10(10-ci) - } else { - if (cs.Asc && c == -1) || (!cs.Asc && c == 1) { - leftScore += 1000 * math.Pow10(10-ci) - } else { - rightScore += 1000 * math.Pow10(10-ci) - } - } - } - - return leftScore > rightScore -} - -func (s Sorting) Swap(i, j int) { s.Rows[i], s.Rows[j] = s.Rows[j], s.Rows[i] } diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/table/table.go b/vendor/github.com/cppforlife/go-cli-ui/ui/table/table.go deleted file mode 100644 index e06f9cb35..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/table/table.go +++ /dev/null @@ -1,209 +0,0 @@ -package table - -import ( - "fmt" - "io" - "sort" - "strings" -) - -func (t Table) AsRows() [][]Value { - rows := [][]Value{} - - totalRows := 0 - - if len(t.Sections) > 0 { - for _, s := range t.Sections { - if s.FirstColumn != nil && len(s.FirstColumn.String()) > 0 { - if len(s.Rows) > 0 && len(s.Rows[0]) > 0 { - for i, _ := range s.Rows { - s.Rows[i][0] = s.FirstColumn - } - } - } - - totalRows += len(s.Rows) - - for _, r := range s.Rows { - rows = append(rows, r) - } - } - } - - if len(t.Rows) > 0 { - totalRows += len(t.Rows) - - for _, r := range t.Rows { - rows = append(rows, r) - } - } - - // Fill in nils - for i, r := range rows { - for j, c := range r { - if c == nil { - rows[i][j] = ValueNone{} - } - } - } - - // Sort all rows - sort.Sort(Sorting{t.SortBy, rows}) - - // Dedup first column - if !t.FillFirstColumn { - var lastVal Value - - for _, r := range rows { - if lastVal == nil { - lastVal = r[0] - } else if lastVal.String() == r[0].String() { - if len(t.DuplicateStr) > 0 { - r[0] = ValueString{t.DuplicateStr} - } else { - r[0] = ValueString{"^"} - } - } else { - lastVal = r[0] - } - } - } - - return rows -} - -func (t Table) Print(w io.Writer) error { - if !t.DataOnly { - err := t.printHeader(w) - if err != nil { - return err - } - } - - if len(t.BackgroundStr) == 0 { - t.BackgroundStr = " " - } - - if len(t.BorderStr) == 0 { - t.BorderStr = " " - } - - writer := NewWriter(w, "-", t.BackgroundStr, t.BorderStr) - rowCount := len(t.Rows) - for _, section := range t.Sections { - rowCount += len(section.Rows) - } - - rows := t.AsRows() - - if t.Transpose { - var newRows [][]Value - - headerVals := buildHeaderVals(t) - - for i, row := range rows { - for j, val := range row { - if t.Header[j].Hidden { - continue - } - - newRows = append(newRows, []Value{headerVals[j], val}) - } - - if i < (len(t.Rows) - 1) { - newRows = append(newRows, []Value{ - EmptyValue{}, - EmptyValue{}, - }) - } - } - - rows = newRows - t.Header = []Header{ - {Hidden: t.DataOnly}, - {Hidden: false}, - } - } else { - if !t.DataOnly && len(t.Header) > 0 { - writer.Write(t.Header, buildHeaderVals(t)) - } - } - - for _, row := range rows { - writer.Write(t.Header, row) - } - - err := writer.Flush() - if err != nil { - return err - } - - if !t.DataOnly { - err = t.printFooter(w, rowCount) - } - - return err -} - -func (t Table) AddColumn(header string, values []Value) Table { - // @todo string -> Header? - t.Header = append(t.Header, NewHeader(header)) - - for i, row := range t.Rows { - row = append(row, values[i]) - t.Rows[i] = row - } - - return t -} - -func buildHeaderVals(t Table) []Value { - var headerVals []Value - - if len(t.Header) > 0 { - for _, h := range t.Header { - headerVals = append(headerVals, ValueFmt{ - V: ValueString{h.Title}, - Func: t.HeaderFormatFunc, - }) - } - } - - return headerVals -} - -func (t Table) printHeader(w io.Writer) error { - if len(t.Title) > 0 { - _, err := fmt.Fprintf(w, "%s\n\n", t.Title) - if err != nil { - return err - } - } - - return nil -} - -func (t Table) printFooter(w io.Writer, num int) error { - if len(t.Notes) > 0 { - _, err := fmt.Fprintf(w, "\n") - if err != nil { - return err - } - - for _, n := range t.Notes { - _, err := fmt.Fprintf(w, "%s\n", n) - if err != nil { - return err - } - } - } - - if len(t.Header) > 0 && strings.TrimSpace(t.Content) != "" { - _, err := fmt.Fprintf(w, "\n%d %s\n", num, t.Content) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/table/values.go b/vendor/github.com/cppforlife/go-cli-ui/ui/table/values.go deleted file mode 100644 index d61fdcc89..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/table/values.go +++ /dev/null @@ -1,174 +0,0 @@ -package table - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" - - "gopkg.in/yaml.v2" -) - -func NewValueString(s string) ValueString { return ValueString{S: s} } - -func (t ValueString) String() string { return t.S } -func (t ValueString) Value() Value { return t } - -func (t ValueString) Compare(other Value) int { - otherS := other.(ValueString).S - switch { - case t.S == otherS: - return 0 - case t.S < otherS: - return -1 - default: - return 1 - } -} - -func (t EmptyValue) String() string { return "" } -func (t EmptyValue) Value() Value { return t } -func (t EmptyValue) Compare(Value) int { return 0 } - -func NewValueStrings(s []string) ValueStrings { return ValueStrings{S: s} } - -func (t ValueStrings) String() string { return strings.Join(t.S, "\n") } -func (t ValueStrings) Value() Value { return t } - -func (t ValueStrings) Compare(other Value) int { - otherS := other.(ValueStrings).S - switch { - case len(t.S) == len(otherS): - return 0 - case len(t.S) < len(otherS): - return -1 - default: - return 1 - } -} - -func NewValueInt(i int) ValueInt { return ValueInt{I: i} } - -func (t ValueInt) String() string { return strconv.Itoa(t.I) } -func (t ValueInt) Value() Value { return t } - -func (t ValueInt) Compare(other Value) int { - otherI := other.(ValueInt).I - switch { - case t.I == otherI: - return 0 - case t.I < otherI: - return -1 - default: - return 1 - } -} - -func NewValueTime(t time.Time) ValueTime { return ValueTime{T: t} } - -func (t ValueTime) String() string { - if t.T.IsZero() { - return "" - } - return t.T.Format(time.RFC3339) -} -func (t ValueTime) Value() Value { return t } - -func (t ValueTime) Compare(other Value) int { - otherT := other.(ValueTime).T - switch { - case t.T.Equal(otherT): - return 0 - case t.T.Before(otherT): - return -1 - default: - return 1 - } -} - -func NewValueBool(b bool) ValueBool { return ValueBool{B: b} } - -func (t ValueBool) String() string { return fmt.Sprintf("%t", t.B) } -func (t ValueBool) Value() Value { return t } - -func (t ValueBool) Compare(other Value) int { - otherB := other.(ValueBool).B - switch { - case t.B == otherB: - return 0 - case t.B == false && otherB == true: - return -1 - default: - return 1 - } -} - -func NewValueError(e error) ValueError { return ValueError{E: e} } - -func (t ValueError) String() string { - if t.E != nil { - return t.E.Error() - } - return "" -} - -func NewValueInterface(i interface{}) ValueInterface { return ValueInterface{I: i} } - -func (t ValueInterface) String() string { - if t.I == nil { - return "" - } - - val := reflect.ValueOf(t.I) - - if val.Kind() == reflect.Map && val.Len() == 0 { - return "" - } else if val.Kind() == reflect.Slice && val.Len() == 0 { - return "" - } - - bytes, err := yaml.Marshal(t.I) - if err != nil { - return fmt.Sprintf(" : %#v", t.I) - } - - return strings.TrimSpace(string(bytes)) -} -func (t ValueInterface) Value() Value { return t } -func (t ValueInterface) Compare(other Value) int { panic("Never called") } - -func (t ValueError) Value() Value { return t } -func (t ValueError) Compare(other Value) int { panic("Never called") } - -func (t ValueNone) String() string { return "" } -func (t ValueNone) Value() Value { return t } -func (t ValueNone) Compare(other Value) int { panic("Never called") } - -func NewValueFmt(v Value, error bool) ValueFmt { return ValueFmt{V: v, Error: error} } - -func (t ValueFmt) String() string { return t.V.String() } -func (t ValueFmt) Value() Value { return t.V } -func (t ValueFmt) Compare(other Value) int { panic("Never called") } - -func (t ValueFmt) Fprintf(w io.Writer, pattern string, rest ...interface{}) (int, error) { - if t.Func == nil { - return fmt.Fprintf(w, pattern, rest...) - } - return fmt.Fprintf(w, "%s", t.Func(pattern, rest...)) -} - -func NewValueSuffix(v Value, s string) ValueSuffix { return ValueSuffix{V: v, Suffix: s} } - -func (t ValueSuffix) String() string { - str := t.V.String() - if len(str) > 0 { - return str + t.Suffix - } - - return "" -} - -func (t ValueSuffix) Value() Value { return t.V } -func (t ValueSuffix) Compare(other Value) int { panic("Never called") } diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/table/writer.go b/vendor/github.com/cppforlife/go-cli-ui/ui/table/writer.go deleted file mode 100644 index d0d204d49..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/table/writer.go +++ /dev/null @@ -1,159 +0,0 @@ -package table - -import ( - "fmt" - "io" - "reflect" - "strings" -) - -type Writer struct { - w io.Writer - emptyStr string - bgStr string - borderStr string - - rows []writerRow - widths map[int]int -} - -type writerCell struct { - Value Value - String string - IsEmpty bool -} - -type writerRow struct { - Values []writerCell - IsSpacer bool -} - -type hasCustomWriter interface { - Fprintf(io.Writer, string, ...interface{}) (int, error) -} - -func NewWriter(w io.Writer, emptyStr, bgStr, borderStr string) *Writer { - return &Writer{ - w: w, - emptyStr: emptyStr, - bgStr: bgStr, - borderStr: borderStr, - widths: map[int]int{}, - } -} - -func (w *Writer) Write(headers []Header, vals []Value) { - rowsToAdd := 1 - colsWithRows := [][]writerCell{} - - visibleHeaderIndex := 0 - for i, val := range vals { - if len(headers) > 0 && headers[i].Hidden { - continue - } - - var rowsInCol []writerCell - - cleanStr := strings.Replace(val.String(), "\r", "", -1) - lines := strings.Split(cleanStr, "\n") - - if len(lines) == 1 && lines[0] == "" { - cell := writerCell{Value: val, String: w.emptyStr} - - if reflect.TypeOf(val) == reflect.TypeOf(EmptyValue{}) { - cell.IsEmpty = true - } - rowsInCol = append(rowsInCol, cell) - } else { - for _, line := range lines { - cell := writerCell{Value: val, String: line} - if reflect.TypeOf(val) == reflect.TypeOf(EmptyValue{}) { - cell.IsEmpty = true - } - rowsInCol = append(rowsInCol, cell) - } - } - - rowsInColLen := len(rowsInCol) - - for _, cell := range rowsInCol { - if len(cell.String) > w.widths[visibleHeaderIndex] { - w.widths[visibleHeaderIndex] = len(cell.String) - } - } - - colsWithRows = append(colsWithRows, rowsInCol) - - if rowsInColLen > rowsToAdd { - rowsToAdd = rowsInColLen - } - - visibleHeaderIndex++ - } - - for i := 0; i < rowsToAdd; i++ { - var row writerRow - - rowIsSeparator := true - for _, col := range colsWithRows { - if i < len(col) { - row.Values = append(row.Values, col[i]) - if !col[i].IsEmpty { - rowIsSeparator = false - } - } else { - row.Values = append(row.Values, writerCell{}) - } - } - row.IsSpacer = rowIsSeparator - - w.rows = append(w.rows, row) - } -} - -func (w *Writer) Flush() error { - for _, row := range w.rows { - if row.IsSpacer { - _, err := fmt.Fprintln(w.w) - if err != nil { - return err - } - continue - } - - lastColIdx := len(row.Values) - 1 - for colIdx, col := range row.Values { - if customWriter, ok := col.Value.(hasCustomWriter); ok { - _, err := customWriter.Fprintf(w.w, "%s", col.String) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(w.w, "%s", col.String) - if err != nil { - return err - } - } - - paddingSize := w.widths[colIdx] - len(col.String) - if colIdx == lastColIdx { - _, err := fmt.Fprintf(w.w, w.borderStr) - if err != nil { - return err - } - } else { - _, err := fmt.Fprintf(w.w, strings.Repeat(w.bgStr, paddingSize)+w.borderStr) - if err != nil { - return err - } - } - } - - _, err := fmt.Fprintln(w.w) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/cppforlife/go-cli-ui/ui/ui.go b/vendor/github.com/cppforlife/go-cli-ui/ui/ui.go deleted file mode 100644 index 15196f669..000000000 --- a/vendor/github.com/cppforlife/go-cli-ui/ui/ui.go +++ /dev/null @@ -1,157 +0,0 @@ -package ui - -import ( - "errors" - "fmt" - "io" - "os" - - "github.com/mattn/go-isatty" - "github.com/vito/go-interact/interact" - - . "github.com/cppforlife/go-cli-ui/ui/table" -) - -type WriterUI struct { - outWriter io.Writer - errWriter io.Writer - logger ExternalLogger - logTag string -} - -func NewConsoleUI(logger ExternalLogger) *WriterUI { - return NewWriterUI(os.Stdout, os.Stderr, logger) -} - -func NewWriterUI(outWriter, errWriter io.Writer, logger ExternalLogger) *WriterUI { - return &WriterUI{ - outWriter: outWriter, - errWriter: errWriter, - - logTag: "ui", - logger: logger, - } -} - -func (ui *WriterUI) IsTTY() bool { - file, ok := ui.outWriter.(*os.File) - - return ok && isatty.IsTerminal(file.Fd()) -} - -// ErrorLinef starts and ends a text error line -func (ui *WriterUI) ErrorLinef(pattern string, args ...interface{}) { - message := fmt.Sprintf(pattern, args...) - _, err := fmt.Fprintln(ui.errWriter, message) - if err != nil { - ui.logger.Error(ui.logTag, "UI.ErrorLinef failed (message='%s'): %s", message, err) - } -} - -// Printlnf starts and ends a text line -func (ui *WriterUI) PrintLinef(pattern string, args ...interface{}) { - message := fmt.Sprintf(pattern, args...) - _, err := fmt.Fprintln(ui.outWriter, message) - if err != nil { - ui.logger.Error(ui.logTag, "UI.PrintLinef failed (message='%s'): %s", message, err) - } -} - -// PrintBeginf starts a text line -func (ui *WriterUI) BeginLinef(pattern string, args ...interface{}) { - message := fmt.Sprintf(pattern, args...) - _, err := fmt.Fprint(ui.outWriter, message) - if err != nil { - ui.logger.Error(ui.logTag, "UI.BeginLinef failed (message='%s'): %s", message, err) - } -} - -// PrintEndf ends a text line -func (ui *WriterUI) EndLinef(pattern string, args ...interface{}) { - message := fmt.Sprintf(pattern, args...) - _, err := fmt.Fprintln(ui.outWriter, message) - if err != nil { - ui.logger.Error(ui.logTag, "UI.EndLinef failed (message='%s'): %s", message, err) - } -} - -func (ui *WriterUI) PrintBlock(block []byte) { - _, err := ui.outWriter.Write(block) - if err != nil { - ui.logger.Error(ui.logTag, "UI.PrintBlock failed (message='%s'): %s", block, err) - } -} - -func (ui *WriterUI) PrintErrorBlock(block string) { - _, err := fmt.Fprint(ui.outWriter, block) - if err != nil { - ui.logger.Error(ui.logTag, "UI.PrintErrorBlock failed (message='%s'): %s", block, err) - } -} - -func (ui *WriterUI) PrintTable(table Table) { - err := table.Print(ui.outWriter) - if err != nil { - ui.logger.Error(ui.logTag, "UI.PrintTable failed: %s", err) - } -} - -func (ui *WriterUI) AskForText(label string) (string, error) { - var text string - - err := interact.NewInteraction(label).Resolve(&text) - if err != nil { - return "", fmt.Errorf("Asking for text: %s", err) - } - - return text, nil -} - -func (ui *WriterUI) AskForChoice(label string, options []string) (int, error) { - var choices []interact.Choice - - for i, opt := range options { - choices = append(choices, interact.Choice{Display: opt, Value: i}) - } - - var chosen int - - err := interact.NewInteraction(label, choices...).Resolve(&chosen) - if err != nil { - return 0, fmt.Errorf("Asking for choice: %s", err) - } - - return chosen, nil -} - -func (ui *WriterUI) AskForPassword(label string) (string, error) { - var password interact.Password - - err := interact.NewInteraction(label).Resolve(&password) - if err != nil { - return "", fmt.Errorf("Asking for password: %s", err) - } - - return string(password), nil -} - -func (ui *WriterUI) AskForConfirmation() error { - falseByDefault := false - - err := interact.NewInteraction("Continue?").Resolve(&falseByDefault) - if err != nil { - return fmt.Errorf("Asking for confirmation: %s", err) - } - - if falseByDefault == false { - return errors.New("Stopped") - } - - return nil -} - -func (ui *WriterUI) IsInteractive() bool { - return true -} - -func (ui *WriterUI) Flush() {} diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md deleted file mode 100644 index 5f16dd140..000000000 --- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ /dev/null @@ -1,45 +0,0 @@ -# 1.6.0 (June 28, 2022) - -FEATURES: - -- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) - -# 1.5.0 (May 18, 2022) - -FEATURES: - -- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) -- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) - -# 1.4.0 (January 5, 2022) - -FEATURES: - - - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) - - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) - -# 1.3.0 (March 31, 2021) - -Please note that CHANGELOG.md does not exist in the source code prior to this release. - -FEATURES: - - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) - -# 1.2.1 (June 17, 2020) - -BUG FIXES: - - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) - -# 1.2.0 (April 23, 2019) - -FEATURES: - - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) - -# 1.1.0 (Jan 07, 2019) - -FEATURES: - - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) - -# 1.0.0 (August 24, 2018) - -Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md deleted file mode 100644 index 4d2505090..000000000 --- a/vendor/github.com/hashicorp/go-version/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) -[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) - -go-version is a library for parsing versions and version constraints, -and verifying versions against a set of constraints. go-version -can sort a collection of versions properly, handles prerelease/beta -versions, can increment versions, etc. - -Versions used with go-version must follow [SemVer](http://semver.org/). - -## Installation and Usage - -Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). - -Installation can be done with a normal `go get`: - -``` -$ go get github.com/hashicorp/go-version -``` - -#### Version Parsing and Comparison - -```go -v1, err := version.NewVersion("1.2") -v2, err := version.NewVersion("1.5+metadata") - -// Comparison example. There is also GreaterThan, Equal, and just -// a simple Compare that returns an int allowing easy >=, <=, etc. -if v1.LessThan(v2) { - fmt.Printf("%s is less than %s", v1, v2) -} -``` - -#### Version Constraints - -```go -v1, err := version.NewVersion("1.2") - -// Constraints example. -constraints, err := version.NewConstraint(">= 1.0, < 1.4") -if constraints.Check(v1) { - fmt.Printf("%s satisfies constraints %s", v1, constraints) -} -``` - -#### Version Sorting - -```go -versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} -versions := make([]*version.Version, len(versionsRaw)) -for i, raw := range versionsRaw { - v, _ := version.NewVersion(raw) - versions[i] = v -} - -// After this, the versions are properly sorted -sort.Sort(version.Collection(versions)) -``` - -## Issues and Contributing - -If you find an issue with this library, please report an issue. If you'd -like, we welcome any contributions. Fork this library and submit a pull -request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go deleted file mode 100644 index da5d1aca1..000000000 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ /dev/null @@ -1,296 +0,0 @@ -package version - -import ( - "fmt" - "reflect" - "regexp" - "sort" - "strings" -) - -// Constraint represents a single constraint for a version, such as -// ">= 1.0". -type Constraint struct { - f constraintFunc - op operator - check *Version - original string -} - -func (c *Constraint) Equals(con *Constraint) bool { - return c.op == con.op && c.check.Equal(con.check) -} - -// Constraints is a slice of constraints. We make a custom type so that -// we can add methods to it. -type Constraints []*Constraint - -type constraintFunc func(v, c *Version) bool - -var constraintOperators map[string]constraintOperation - -type constraintOperation struct { - op operator - f constraintFunc -} - -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintOperation{ - "": {op: equal, f: constraintEqual}, - "=": {op: equal, f: constraintEqual}, - "!=": {op: notEqual, f: constraintNotEqual}, - ">": {op: greaterThan, f: constraintGreaterThan}, - "<": {op: lessThan, f: constraintLessThan}, - ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, - "<=": {op: lessThanEqual, f: constraintLessThanEqual}, - "~>": {op: pessimistic, f: constraintPessimistic}, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - -// NewConstraint will parse one or more constraints from the given -// constraint string. The string must be a comma-separated list of -// constraints. -func NewConstraint(v string) (Constraints, error) { - vs := strings.Split(v, ",") - result := make([]*Constraint, len(vs)) - for i, single := range vs { - c, err := parseSingle(single) - if err != nil { - return nil, err - } - - result[i] = c - } - - return Constraints(result), nil -} - -// MustConstraints is a helper that wraps a call to a function -// returning (Constraints, error) and panics if error is non-nil. -func MustConstraints(c Constraints, err error) Constraints { - if err != nil { - panic(err) - } - - return c -} - -// Check tests if a version satisfies all the constraints. -func (cs Constraints) Check(v *Version) bool { - for _, c := range cs { - if !c.Check(v) { - return false - } - } - - return true -} - -// Equals compares Constraints with other Constraints -// for equality. This may not represent logical equivalence -// of compared constraints. -// e.g. even though '>0.1,>0.2' is logically equivalent -// to '>0.2' it is *NOT* treated as equal. -// -// Missing operator is treated as equal to '=', whitespaces -// are ignored and constraints are sorted before comaparison. -func (cs Constraints) Equals(c Constraints) bool { - if len(cs) != len(c) { - return false - } - - // make copies to retain order of the original slices - left := make(Constraints, len(cs)) - copy(left, cs) - sort.Stable(left) - right := make(Constraints, len(c)) - copy(right, c) - sort.Stable(right) - - // compare sorted slices - for i, con := range left { - if !con.Equals(right[i]) { - return false - } - } - - return true -} - -func (cs Constraints) Len() int { - return len(cs) -} - -func (cs Constraints) Less(i, j int) bool { - if cs[i].op < cs[j].op { - return true - } - if cs[i].op > cs[j].op { - return false - } - - return cs[i].check.LessThan(cs[j].check) -} - -func (cs Constraints) Swap(i, j int) { - cs[i], cs[j] = cs[j], cs[i] -} - -// Returns the string format of the constraints -func (cs Constraints) String() string { - csStr := make([]string, len(cs)) - for i, c := range cs { - csStr[i] = c.String() - } - - return strings.Join(csStr, ",") -} - -// Check tests if a constraint is validated by the given version. -func (c *Constraint) Check(v *Version) bool { - return c.f(v, c.check) -} - -// Prerelease returns true if the version underlying this constraint -// contains a prerelease field. -func (c *Constraint) Prerelease() bool { - return len(c.check.Prerelease()) > 0 -} - -func (c *Constraint) String() string { - return c.original -} - -func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) - } - - check, err := NewVersion(matches[2]) - if err != nil { - return nil, err - } - - cop := constraintOperators[matches[1]] - - return &Constraint{ - f: cop.f, - op: cop.op, - check: check, - original: v, - }, nil -} - -func prereleaseCheck(v, c *Version) bool { - switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { - case cPre && vPre: - // A constraint with a pre-release can only match a pre-release version - // with the same base segments. - return reflect.DeepEqual(c.Segments64(), v.Segments64()) - - case !cPre && vPre: - // A constraint without a pre-release can only match a version without a - // pre-release. - return false - - case cPre && !vPre: - // OK, except with the pessimistic operator - case !cPre && !vPre: - // OK - } - return true -} - -//------------------------------------------------------------------- -// Constraint functions -//------------------------------------------------------------------- - -type operator rune - -const ( - equal operator = '=' - notEqual operator = '≠' - greaterThan operator = '>' - lessThan operator = '<' - greaterThanEqual operator = '≥' - lessThanEqual operator = '≤' - pessimistic operator = '~' -) - -func constraintEqual(v, c *Version) bool { - return v.Equal(c) -} - -func constraintNotEqual(v, c *Version) bool { - return !v.Equal(c) -} - -func constraintGreaterThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == 1 -} - -func constraintLessThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == -1 -} - -func constraintGreaterThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) >= 0 -} - -func constraintLessThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) <= 0 -} - -func constraintPessimistic(v, c *Version) bool { - // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases - if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { - return false - } - - // If the version being checked is naturally less than the constraint, then there - // is no way for the version to be valid against the constraint - if v.LessThan(c) { - return false - } - // We'll use this more than once, so grab the length now so it's a little cleaner - // to write the later checks - cs := len(c.segments) - - // If the version being checked has less specificity than the constraint, then there - // is no way for the version to be valid against the constraint - if cs > len(v.segments) { - return false - } - - // Check the segments in the constraint against those in the version. If the version - // being checked, at any point, does not have the same values in each index of the - // constraints segments, then it cannot be valid against the constraint. - for i := 0; i < c.si-1; i++ { - if v.segments[i] != c.segments[i] { - return false - } - } - - // Check the last part of the segment in the constraint. If the version segment at - // this index is less than the constraints segment at this index, then it cannot - // be valid against the constraint - if c.segments[cs-1] > v.segments[cs-1] { - return false - } - - // If nothing has rejected the version by now, it's valid - return true -} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go deleted file mode 100644 index e87df6990..000000000 --- a/vendor/github.com/hashicorp/go-version/version.go +++ /dev/null @@ -1,407 +0,0 @@ -package version - -import ( - "bytes" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -// The compiled regular expression used to test the validity of a version. -var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp -) - -// The raw regular expression string used for testing the validity -// of a version. -const ( - VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` - - // SemverRegexpRaw requires a separator between version and prerelease - SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` -) - -// Version represents a single version. -type Version struct { - metadata string - pre string - segments []int64 - si int - original string -} - -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - -// NewVersion parses the given version and returns a new -// Version. -func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) -} - -// NewSemver parses the given version and returns a new -// Version that adheres strictly to SemVer specs -// https://semver.org/ -func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) -} - -func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { - matches := pattern.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) - } - segmentsStr := strings.Split(matches[1], ".") - segments := make([]int64, len(segmentsStr)) - for i, str := range segmentsStr { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version: %s", err) - } - - segments[i] = val - } - - // Even though we could support more than three segments, if we - // got less than three, pad it with 0s. This is to cover the basic - // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum - for i := len(segments); i < 3; i++ { - segments = append(segments, 0) - } - - pre := matches[7] - if pre == "" { - pre = matches[4] - } - - return &Version{ - metadata: matches[10], - pre: pre, - segments: segments, - si: len(segmentsStr), - original: v, - }, nil -} - -// Must is a helper that wraps a call to a function returning (*Version, error) -// and panics if error is non-nil. -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - - return v -} - -// Compare compares this version to another version. This -// returns -1, 0, or 1 if this version is smaller, equal, -// or larger than the other version, respectively. -// -// If you want boolean results, use the LessThan, Equal, -// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. -func (v *Version) Compare(other *Version) int { - // A quick, efficient equality check - if v.String() == other.String() { - return 0 - } - - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { - preSelf := v.Prerelease() - preOther := other.Prerelease() - if preSelf == "" && preOther == "" { - return 0 - } - if preSelf == "" { - return 1 - } - if preOther == "" { - return -1 - } - - return comparePrereleases(preSelf, preOther) - } - - // Get the highest specificity (hS), or if they're equal, just use segmentSelf length - lenSelf := len(segmentsSelf) - lenOther := len(segmentsOther) - hS := lenSelf - if lenSelf < lenOther { - hS = lenOther - } - // Compare the segments - // Because a constraint could have more/less specificity than the version it's - // checking, we need to account for a lopsided or jagged comparison - for i := 0; i < hS; i++ { - if i > lenSelf-1 { - // This means Self had the lower specificity - // Check to see if the remaining segments in Other are all zeros - if !allZero(segmentsOther[i:]) { - // if not, it means that Other has to be greater than Self - return -1 - } - break - } else if i > lenOther-1 { - // this means Other had the lower specificity - // Check to see if the remaining segments in Self are all zeros - - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other - return 1 - } - break - } - lhs := segmentsSelf[i] - rhs := segmentsOther[i] - if lhs == rhs { - continue - } else if lhs < rhs { - return -1 - } - // Otherwis, rhs was > lhs, they're not equal - return 1 - } - - // if we got this far, they're equal - return 0 -} - -func allZero(segs []int64) bool { - for _, s := range segs { - if s != 0 { - return false - } - } - return true -} - -func comparePart(preSelf string, preOther string) int { - if preSelf == preOther { - return 0 - } - - var selfInt int64 - selfNumeric := true - selfInt, err := strconv.ParseInt(preSelf, 10, 64) - if err != nil { - selfNumeric = false - } - - var otherInt int64 - otherNumeric := true - otherInt, err = strconv.ParseInt(preOther, 10, 64) - if err != nil { - otherNumeric = false - } - - // if a part is empty, we use the other to decide - if preSelf == "" { - if otherNumeric { - return -1 - } - return 1 - } - - if preOther == "" { - if selfNumeric { - return 1 - } - return -1 - } - - if selfNumeric && !otherNumeric { - return -1 - } else if !selfNumeric && otherNumeric { - return 1 - } else if !selfNumeric && !otherNumeric && preSelf > preOther { - return 1 - } else if selfInt > otherInt { - return 1 - } - - return -1 -} - -func comparePrereleases(v string, other string) int { - // the same pre release! - if v == other { - return 0 - } - - // split both pre releases for analyse their parts - selfPreReleaseMeta := strings.Split(v, ".") - otherPreReleaseMeta := strings.Split(other, ".") - - selfPreReleaseLen := len(selfPreReleaseMeta) - otherPreReleaseLen := len(otherPreReleaseMeta) - - biggestLen := otherPreReleaseLen - if selfPreReleaseLen > otherPreReleaseLen { - biggestLen = selfPreReleaseLen - } - - // loop for parts to find the first difference - for i := 0; i < biggestLen; i = i + 1 { - partSelfPre := "" - if i < selfPreReleaseLen { - partSelfPre = selfPreReleaseMeta[i] - } - - partOtherPre := "" - if i < otherPreReleaseLen { - partOtherPre = otherPreReleaseMeta[i] - } - - compare := comparePart(partSelfPre, partOtherPre) - // if parts are equals, continue the loop - if compare != 0 { - return compare - } - } - - return 0 -} - -// Core returns a new version constructed from only the MAJOR.MINOR.PATCH -// segments of the version, without prerelease or metadata. -func (v *Version) Core() *Version { - segments := v.Segments64() - segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) - return Must(NewVersion(segmentsOnly)) -} - -// Equal tests if two versions are equal. -func (v *Version) Equal(o *Version) bool { - if v == nil || o == nil { - return v == o - } - - return v.Compare(o) == 0 -} - -// GreaterThan tests if this version is greater than another version. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// GreaterThanOrEqual tests if this version is greater than or equal to another version. -func (v *Version) GreaterThanOrEqual(o *Version) bool { - return v.Compare(o) >= 0 -} - -// LessThan tests if this version is less than another version. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// LessThanOrEqual tests if this version is less than or equal to another version. -func (v *Version) LessThanOrEqual(o *Version) bool { - return v.Compare(o) <= 0 -} - -// Metadata returns any metadata that was part of the version -// string. -// -// Metadata is anything that comes after the "+" in the version. -// For example, with "1.2.3+beta", the metadata is "beta". -func (v *Version) Metadata() string { - return v.metadata -} - -// Prerelease returns any prerelease data that is part of the version, -// or blank if there is no prerelease data. -// -// Prerelease information is anything that comes after the "-" in the -// version (but before any metadata). For example, with "1.2.3-beta", -// the prerelease information is "beta". -func (v *Version) Prerelease() string { - return v.pre -} - -// Segments returns the numeric segments of the version as a slice of ints. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments() []int { - segmentSlice := make([]int, len(v.segments)) - for i, v := range v.segments { - segmentSlice[i] = int(v) - } - return segmentSlice -} - -// Segments64 returns the numeric segments of the version as a slice of int64s. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments64() []int64 { - result := make([]int64, len(v.segments)) - copy(result, v.segments) - return result -} - -// String returns the full version string included pre-release -// and metadata information. -// -// This value is rebuilt according to the parsed segments and other -// information. Therefore, ambiguities in the version string such as -// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and -// missing parts (1.0 => 1.0.0) will be made into a canonicalized form -// as shown in the parenthesized examples. -func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) - for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str - } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original parsed version as-is, including any -// potential whitespace, `v` prefix, etc. -func (v *Version) Original() string { - return v.original -} - -// UnmarshalText implements encoding.TextUnmarshaler interface. -func (v *Version) UnmarshalText(b []byte) error { - temp, err := NewVersion(string(b)) - if err != nil { - return err - } - - *v = *temp - - return nil -} - -// MarshalText implements encoding.TextMarshaler interface. -func (v *Version) MarshalText() ([]byte, error) { - return []byte(v.String()), nil -} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go deleted file mode 100644 index cc888d43e..000000000 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -// Collection is a type that implements the sort.Interface interface -// so that versions can be sorted. -type Collection []*Version - -func (v Collection) Len() int { - return len(v) -} - -func (v Collection) Less(i, j int) bool { - return v[i].LessThan(v[j]) -} - -func (v Collection) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} diff --git a/vendor/github.com/k14s/starlark-go/LICENSE b/vendor/github.com/k14s/starlark-go/LICENSE deleted file mode 100644 index a6609a143..000000000 --- a/vendor/github.com/k14s/starlark-go/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2017 The Bazel Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/k14s/starlark-go/internal/compile/compile.go b/vendor/github.com/k14s/starlark-go/internal/compile/compile.go deleted file mode 100644 index 3d0e6879f..000000000 --- a/vendor/github.com/k14s/starlark-go/internal/compile/compile.go +++ /dev/null @@ -1,1903 +0,0 @@ -// Package compile defines the Starlark bytecode compiler. -// It is an internal package of the Starlark interpreter and is not directly accessible to clients. -// -// The compiler generates byte code with optional uint32 operands for a -// virtual machine with the following components: -// - a program counter, which is an index into the byte code array. -// - an operand stack, whose maximum size is computed for each function by the compiler. -// - an stack of active iterators. -// - an array of local variables. -// The number of local variables and their indices are computed by the resolver. -// Locals (possibly including parameters) that are shared with nested functions -// are 'cells': their locals array slot will contain a value of type 'cell', -// an indirect value in a box that is explicitly read/updated by instructions. -// - an array of free variables, for nested functions. -// Free variables are a subset of the ancestors' cell variables. -// As with locals and cells, these are computed by the resolver. -// - an array of global variables, shared among all functions in the same module. -// All elements are initially nil. -// - two maps of predeclared and universal identifiers. -// -// Each function has a line number table that maps each program counter -// offset to a source position, including the column number. -// -// Operands, logically uint32s, are encoded using little-endian 7-bit -// varints, the top bit indicating that more bytes follow. -// -package compile // import "github.com/k14s/starlark-go/internal/compile" - -import ( - "bytes" - "fmt" - "log" - "os" - "path/filepath" - "strconv" - "sync" - - "github.com/k14s/starlark-go/resolve" - "github.com/k14s/starlark-go/syntax" -) - -// Disassemble causes the assembly code for each function -// to be printed to stderr as it is generated. -var Disassemble = false - -const debug = false // make code generation verbose, for debugging the compiler - -// Increment this to force recompilation of saved bytecode files. -const Version = 10 - -type Opcode uint8 - -// "x DUP x x" is a "stack picture" that describes the state of the -// stack before and after execution of the instruction. -// -// OP indicates an immediate operand that is an index into the -// specified table: locals, names, freevars, constants. -const ( - NOP Opcode = iota // - NOP - - - // stack operations - DUP // x DUP x x - DUP2 // x y DUP2 x y x y - POP // x POP - - EXCH // x y EXCH y x - - // binary comparisons - // (order must match Token) - LT - GT - GE - LE - EQL - NEQ - - // binary arithmetic - // (order must match Token) - PLUS - MINUS - STAR - SLASH - SLASHSLASH - PERCENT - AMP - PIPE - CIRCUMFLEX - LTLT - GTGT - - IN - - // unary operators - UPLUS // x UPLUS x - UMINUS // x UMINUS -x - TILDE // x TILDE ~x - - NONE // - NONE None - TRUE // - TRUE True - FALSE // - FALSE False - MANDATORY // - MANDATORY Mandatory [sentinel value for required kwonly args] - - ITERPUSH // iterable ITERPUSH - [pushes the iterator stack] - ITERPOP // - ITERPOP - [pops the iterator stack] - NOT // value NOT bool - RETURN // value RETURN - - SETINDEX // a i new SETINDEX - - INDEX // a i INDEX elem - SETDICT // dict key value SETDICT - - SETDICTUNIQ // dict key value SETDICTUNIQ - - APPEND // list elem APPEND - - SLICE // x lo hi step SLICE slice - INPLACE_ADD // x y INPLACE_ADD z where z is x+y or x.extend(y) - MAKEDICT // - MAKEDICT dict - SETCELL // value cell SETCELL - - CELL // cell CELL value - - // --- opcodes with an argument must go below this line --- - - // control flow - JMP // - JMP - - CJMP // cond CJMP - - ITERJMP // - ITERJMP elem (and fall through) [acts on topmost iterator] - // or: - ITERJMP - (and jump) - - CONSTANT // - CONSTANT value - MAKETUPLE // x1 ... xn MAKETUPLE tuple - MAKELIST // x1 ... xn MAKELIST list - MAKEFUNC // defaults+freevars MAKEFUNC fn - LOAD // from1 ... fromN module LOAD v1 ... vN - SETLOCAL // value SETLOCAL - - SETGLOBAL // value SETGLOBAL - - LOCAL // - LOCAL value - FREE // - FREE cell - GLOBAL // - GLOBAL value - PREDECLARED // - PREDECLARED value - UNIVERSAL // - UNIVERSAL value - ATTR // x ATTR y y = x.name - SETFIELD // x y SETFIELD - x.name = y - UNPACK // iterable UNPACK vn ... v1 - - // n>>8 is #positional args and n&0xff is #named args (pairs). - CALL // fn positional named CALL result - CALL_VAR // fn positional named *args CALL_VAR result - CALL_KW // fn positional named **kwargs CALL_KW result - CALL_VAR_KW // fn positional named *args **kwargs CALL_VAR_KW result - - OpcodeArgMin = JMP - OpcodeMax = CALL_VAR_KW -) - -// TODO(adonovan): add dynamic checks for missing opcodes in the tables below. - -var opcodeNames = [...]string{ - AMP: "amp", - APPEND: "append", - ATTR: "attr", - CALL: "call", - CALL_KW: "call_kw ", - CALL_VAR: "call_var", - CALL_VAR_KW: "call_var_kw", - CELL: "cell", - CIRCUMFLEX: "circumflex", - CJMP: "cjmp", - CONSTANT: "constant", - DUP2: "dup2", - DUP: "dup", - EQL: "eql", - EXCH: "exch", - FALSE: "false", - FREE: "free", - GE: "ge", - GLOBAL: "global", - GT: "gt", - GTGT: "gtgt", - IN: "in", - INDEX: "index", - INPLACE_ADD: "inplace_add", - ITERJMP: "iterjmp", - ITERPOP: "iterpop", - ITERPUSH: "iterpush", - JMP: "jmp", - LE: "le", - LOAD: "load", - LOCAL: "local", - LT: "lt", - LTLT: "ltlt", - MAKEDICT: "makedict", - MAKEFUNC: "makefunc", - MAKELIST: "makelist", - MAKETUPLE: "maketuple", - MANDATORY: "mandatory", - MINUS: "minus", - NEQ: "neq", - NONE: "none", - NOP: "nop", - NOT: "not", - PERCENT: "percent", - PIPE: "pipe", - PLUS: "plus", - POP: "pop", - PREDECLARED: "predeclared", - RETURN: "return", - SETCELL: "setcell", - SETDICT: "setdict", - SETDICTUNIQ: "setdictuniq", - SETFIELD: "setfield", - SETGLOBAL: "setglobal", - SETINDEX: "setindex", - SETLOCAL: "setlocal", - SLASH: "slash", - SLASHSLASH: "slashslash", - SLICE: "slice", - STAR: "star", - TILDE: "tilde", - TRUE: "true", - UMINUS: "uminus", - UNIVERSAL: "universal", - UNPACK: "unpack", - UPLUS: "uplus", -} - -const variableStackEffect = 0x7f - -// stackEffect records the effect on the size of the operand stack of -// each kind of instruction. For some instructions this requires computation. -var stackEffect = [...]int8{ - AMP: -1, - APPEND: -2, - ATTR: 0, - CALL: variableStackEffect, - CALL_KW: variableStackEffect, - CALL_VAR: variableStackEffect, - CALL_VAR_KW: variableStackEffect, - CELL: 0, - CIRCUMFLEX: -1, - CJMP: -1, - CONSTANT: +1, - DUP2: +2, - DUP: +1, - EQL: -1, - FALSE: +1, - FREE: +1, - GE: -1, - GLOBAL: +1, - GT: -1, - GTGT: -1, - IN: -1, - INDEX: -1, - INPLACE_ADD: -1, - ITERJMP: variableStackEffect, - ITERPOP: 0, - ITERPUSH: -1, - JMP: 0, - LE: -1, - LOAD: -1, - LOCAL: +1, - LT: -1, - LTLT: -1, - MAKEDICT: +1, - MAKEFUNC: 0, - MAKELIST: variableStackEffect, - MAKETUPLE: variableStackEffect, - MANDATORY: +1, - MINUS: -1, - NEQ: -1, - NONE: +1, - NOP: 0, - NOT: 0, - PERCENT: -1, - PIPE: -1, - PLUS: -1, - POP: -1, - PREDECLARED: +1, - RETURN: -1, - SETCELL: -2, - SETDICT: -3, - SETDICTUNIQ: -3, - SETFIELD: -2, - SETGLOBAL: -1, - SETINDEX: -3, - SETLOCAL: -1, - SLASH: -1, - SLASHSLASH: -1, - SLICE: -3, - STAR: -1, - TRUE: +1, - UMINUS: 0, - UNIVERSAL: +1, - UNPACK: variableStackEffect, - UPLUS: 0, -} - -func (op Opcode) String() string { - if op < OpcodeMax { - if name := opcodeNames[op]; name != "" { - return name - } - } - return fmt.Sprintf("illegal op (%d)", op) -} - -// A Program is a Starlark file in executable form. -// -// Programs are serialized by the Program.Encode method, -// which must be updated whenever this declaration is changed. -type Program struct { - Loads []Binding // name (really, string) and position of each load stmt - Names []string // names of attributes and predeclared variables - Constants []interface{} // = string | int64 | float64 | *big.Int - Functions []*Funcode - Globals []Binding // for error messages and tracing - Toplevel *Funcode // module initialization function -} - -// A Funcode is the code of a compiled Starlark function. -// -// Funcodes are serialized by the encoder.function method, -// which must be updated whenever this declaration is changed. -type Funcode struct { - Prog *Program - Pos syntax.Position // position of def or lambda token - Name string // name of this function - Doc string // docstring of this function - Code []byte // the byte code - pclinetab []uint16 // mapping from pc to linenum - Locals []Binding // locals, parameters first - Cells []int // indices of Locals that require cells - Freevars []Binding // for tracing - MaxStack int - NumParams int - NumKwonlyParams int - HasVarargs, HasKwargs bool - - // -- transient state -- - - lntOnce sync.Once - lnt []pclinecol // decoded line number table -} - -type pclinecol struct { - pc uint32 - line, col int32 -} - -// A Binding is the name and position of a binding identifier. -type Binding struct { - Name string - Pos syntax.Position -} - -// A pcomp holds the compiler state for a Program. -type pcomp struct { - prog *Program // what we're building - - names map[string]uint32 - constants map[interface{}]uint32 - functions map[*Funcode]uint32 -} - -// An fcomp holds the compiler state for a Funcode. -type fcomp struct { - fn *Funcode // what we're building - - pcomp *pcomp - pos syntax.Position // current position of generated code - loops []loop - block *block -} - -type loop struct { - break_, continue_ *block -} - -type block struct { - insns []insn - - // If the last insn is a RETURN, jmp and cjmp are nil. - // If the last insn is a CJMP or ITERJMP, - // cjmp and jmp are the "true" and "false" successors. - // Otherwise, jmp is the sole successor. - jmp, cjmp *block - - initialstack int // for stack depth computation - - // Used during encoding - index int // -1 => not encoded yet - addr uint32 -} - -type insn struct { - op Opcode - arg uint32 - line, col int32 -} - -// Position returns the source position for program counter pc. -func (fn *Funcode) Position(pc uint32) syntax.Position { - fn.lntOnce.Do(fn.decodeLNT) - - // Binary search to find last LNT entry not greater than pc. - // To avoid dynamic dispatch, this is a specialization of - // sort.Search using this predicate: - // !(i < len(fn.lnt)-1 && fn.lnt[i+1].pc <= pc) - n := len(fn.lnt) - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) - if !(h >= n-1 || fn.lnt[h+1].pc > pc) { - i = h + 1 - } else { - j = h - } - } - - var line, col int32 - if i < n { - line = fn.lnt[i].line - col = fn.lnt[i].col - } - - pos := fn.Pos // copy the (annoyingly inaccessible) filename - pos.Col = col - pos.Line = line - return pos -} - -// decodeLNT decodes the line number table and populates fn.lnt. -// It is called at most once. -func (fn *Funcode) decodeLNT() { - // Conceptually the table contains rows of the form - // (pc uint32, line int32, col int32), sorted by pc. - // We use a delta encoding, since the differences - // between successive pc, line, and column values - // are typically small and positive (though line and - // especially column differences may be negative). - // The delta encoding starts from - // {pc: 0, line: fn.Pos.Line, col: fn.Pos.Col}. - // - // Each entry is packed into one or more 16-bit values: - // Δpc uint4 - // Δline int5 - // Δcol int6 - // incomplete uint1 - // The top 4 bits are the unsigned delta pc. - // The next 5 bits are the signed line number delta. - // The next 6 bits are the signed column number delta. - // The bottom bit indicates that more rows follow because - // one of the deltas was maxed out. - // These field widths were chosen from a sample of real programs, - // and allow >97% of rows to be encoded in a single uint16. - - fn.lnt = make([]pclinecol, 0, len(fn.pclinetab)) // a minor overapproximation - entry := pclinecol{ - pc: 0, - line: fn.Pos.Line, - col: fn.Pos.Col, - } - for _, x := range fn.pclinetab { - entry.pc += uint32(x) >> 12 - entry.line += int32((int16(x) << 4) >> (16 - 5)) // sign extend Δline - entry.col += int32((int16(x) << 9) >> (16 - 6)) // sign extend Δcol - if (x & 1) == 0 { - fn.lnt = append(fn.lnt, entry) - } - } -} - -// bindings converts resolve.Bindings to compiled form. -func bindings(bindings []*resolve.Binding) []Binding { - res := make([]Binding, len(bindings)) - for i, bind := range bindings { - res[i].Name = bind.First.Name - res[i].Pos = bind.First.NamePos - } - return res -} - -// Expr compiles an expression to a program whose toplevel function evaluates it. -func Expr(expr syntax.Expr, name string, locals []*resolve.Binding) *Program { - pos := syntax.Start(expr) - stmts := []syntax.Stmt{&syntax.ReturnStmt{Result: expr}} - return File(stmts, pos, name, locals, nil) -} - -// File compiles the statements of a file into a program. -func File(stmts []syntax.Stmt, pos syntax.Position, name string, locals, globals []*resolve.Binding) *Program { - pcomp := &pcomp{ - prog: &Program{ - Globals: bindings(globals), - }, - names: make(map[string]uint32), - constants: make(map[interface{}]uint32), - functions: make(map[*Funcode]uint32), - } - pcomp.prog.Toplevel = pcomp.function(name, pos, stmts, locals, nil) - - return pcomp.prog -} - -func (pcomp *pcomp) function(name string, pos syntax.Position, stmts []syntax.Stmt, locals, freevars []*resolve.Binding) *Funcode { - fcomp := &fcomp{ - pcomp: pcomp, - pos: pos, - fn: &Funcode{ - Prog: pcomp.prog, - Pos: pos, - Name: name, - Doc: docStringFromBody(stmts), - Locals: bindings(locals), - Freevars: bindings(freevars), - }, - } - - // Record indices of locals that require cells. - for i, local := range locals { - if local.Scope == resolve.Cell { - fcomp.fn.Cells = append(fcomp.fn.Cells, i) - } - } - - if debug { - fmt.Fprintf(os.Stderr, "start function(%s @ %s)\n", name, pos) - } - - // Convert AST to a CFG of instructions. - entry := fcomp.newBlock() - fcomp.block = entry - fcomp.stmts(stmts) - if fcomp.block != nil { - fcomp.emit(NONE) - fcomp.emit(RETURN) - } - - var oops bool // something bad happened - - setinitialstack := func(b *block, depth int) { - if b.initialstack == -1 { - b.initialstack = depth - } else if b.initialstack != depth { - fmt.Fprintf(os.Stderr, "%d: setinitialstack: depth mismatch: %d vs %d\n", - b.index, b.initialstack, depth) - oops = true - } - } - - // Linearize the CFG: - // compute order, address, and initial - // stack depth of each reachable block. - var pc uint32 - var blocks []*block - var maxstack int - var visit func(b *block) - visit = func(b *block) { - if b.index >= 0 { - return // already visited - } - b.index = len(blocks) - b.addr = pc - blocks = append(blocks, b) - - stack := b.initialstack - if debug { - fmt.Fprintf(os.Stderr, "%s block %d: (stack = %d)\n", name, b.index, stack) - } - var cjmpAddr *uint32 - var isiterjmp int - for i, insn := range b.insns { - pc++ - - // Compute size of argument. - if insn.op >= OpcodeArgMin { - switch insn.op { - case ITERJMP: - isiterjmp = 1 - fallthrough - case CJMP: - cjmpAddr = &b.insns[i].arg - pc += 4 - default: - pc += uint32(argLen(insn.arg)) - } - } - - // Compute effect on stack. - se := insn.stackeffect() - if debug { - fmt.Fprintln(os.Stderr, "\t", insn.op, stack, stack+se) - } - stack += se - if stack < 0 { - fmt.Fprintf(os.Stderr, "After pc=%d: stack underflow\n", pc) - oops = true - } - if stack+isiterjmp > maxstack { - maxstack = stack + isiterjmp - } - } - - if debug { - fmt.Fprintf(os.Stderr, "successors of block %d (start=%d):\n", - b.addr, b.index) - if b.jmp != nil { - fmt.Fprintf(os.Stderr, "jmp to %d\n", b.jmp.index) - } - if b.cjmp != nil { - fmt.Fprintf(os.Stderr, "cjmp to %d\n", b.cjmp.index) - } - } - - // Place the jmp block next. - if b.jmp != nil { - // jump threading (empty cycles are impossible) - for b.jmp.insns == nil { - b.jmp = b.jmp.jmp - } - - setinitialstack(b.jmp, stack+isiterjmp) - if b.jmp.index < 0 { - // Successor is not yet visited: - // place it next and fall through. - visit(b.jmp) - } else { - // Successor already visited; - // explicit backward jump required. - pc += 5 - } - } - - // Then the cjmp block. - if b.cjmp != nil { - // jump threading (empty cycles are impossible) - for b.cjmp.insns == nil { - b.cjmp = b.cjmp.jmp - } - - setinitialstack(b.cjmp, stack) - visit(b.cjmp) - - // Patch the CJMP/ITERJMP, if present. - if cjmpAddr != nil { - *cjmpAddr = b.cjmp.addr - } - } - } - setinitialstack(entry, 0) - visit(entry) - - fn := fcomp.fn - fn.MaxStack = maxstack - - // Emit bytecode (and position table). - if Disassemble { - fmt.Fprintf(os.Stderr, "Function %s: (%d blocks, %d bytes)\n", name, len(blocks), pc) - } - fcomp.generate(blocks, pc) - - if debug { - fmt.Fprintf(os.Stderr, "code=%d maxstack=%d\n", fn.Code, fn.MaxStack) - } - - // Don't panic until we've completed printing of the function. - if oops { - panic("internal error") - } - - if debug { - fmt.Fprintf(os.Stderr, "end function(%s @ %s)\n", name, pos) - } - - return fn -} - -func docStringFromBody(body []syntax.Stmt) string { - if len(body) == 0 { - return "" - } - expr, ok := body[0].(*syntax.ExprStmt) - if !ok { - return "" - } - lit, ok := expr.X.(*syntax.Literal) - if !ok { - return "" - } - if lit.Token != syntax.STRING { - return "" - } - return lit.Value.(string) -} - -func (insn *insn) stackeffect() int { - se := int(stackEffect[insn.op]) - if se == variableStackEffect { - arg := int(insn.arg) - switch insn.op { - case CALL, CALL_KW, CALL_VAR, CALL_VAR_KW: - se = -int(2*(insn.arg&0xff) + insn.arg>>8) - if insn.op != CALL { - se-- - } - if insn.op == CALL_VAR_KW { - se-- - } - case ITERJMP: - // Stack effect differs by successor: - // +1 for jmp/false/ok - // 0 for cjmp/true/exhausted - // Handled specially in caller. - se = 0 - case MAKELIST, MAKETUPLE: - se = 1 - arg - case UNPACK: - se = arg - 1 - default: - panic(insn.op) - } - } - return se -} - -// generate emits the linear instruction stream from the CFG, -// and builds the PC-to-line number table. -func (fcomp *fcomp) generate(blocks []*block, codelen uint32) { - code := make([]byte, 0, codelen) - var pclinetab []uint16 - prev := pclinecol{ - pc: 0, - line: fcomp.fn.Pos.Line, - col: fcomp.fn.Pos.Col, - } - - for _, b := range blocks { - if Disassemble { - fmt.Fprintf(os.Stderr, "%d:\n", b.index) - } - pc := b.addr - for _, insn := range b.insns { - if insn.line != 0 { - // Instruction has a source position. Delta-encode it. - // See Funcode.Position for the encoding. - for { - var incomplete uint16 - - // Δpc, uint4 - deltapc := pc - prev.pc - if deltapc > 0x0f { - deltapc = 0x0f - incomplete = 1 - } - prev.pc += deltapc - - // Δline, int5 - deltaline, ok := clip(insn.line-prev.line, -0x10, 0x0f) - if !ok { - incomplete = 1 - } - prev.line += deltaline - - // Δcol, int6 - deltacol, ok := clip(insn.col-prev.col, -0x20, 0x1f) - if !ok { - incomplete = 1 - } - prev.col += deltacol - - entry := uint16(deltapc<<12) | uint16(deltaline&0x1f)<<7 | uint16(deltacol&0x3f)<<1 | incomplete - pclinetab = append(pclinetab, entry) - if incomplete == 0 { - break - } - } - - if Disassemble { - fmt.Fprintf(os.Stderr, "\t\t\t\t\t; %s:%d:%d\n", - filepath.Base(fcomp.fn.Pos.Filename()), insn.line, insn.col) - } - } - if Disassemble { - PrintOp(fcomp.fn, pc, insn.op, insn.arg) - } - code = append(code, byte(insn.op)) - pc++ - if insn.op >= OpcodeArgMin { - if insn.op == CJMP || insn.op == ITERJMP { - code = addUint32(code, insn.arg, 4) // pad arg to 4 bytes - } else { - code = addUint32(code, insn.arg, 0) - } - pc = uint32(len(code)) - } - } - - if b.jmp != nil && b.jmp.index != b.index+1 { - addr := b.jmp.addr - if Disassemble { - fmt.Fprintf(os.Stderr, "\t%d\tjmp\t\t%d\t; block %d\n", - pc, addr, b.jmp.index) - } - code = append(code, byte(JMP)) - code = addUint32(code, addr, 4) - } - } - if len(code) != int(codelen) { - panic("internal error: wrong code length") - } - - fcomp.fn.pclinetab = pclinetab - fcomp.fn.Code = code -} - -// clip returns the value nearest x in the range [min...max], -// and whether it equals x. -func clip(x, min, max int32) (int32, bool) { - if x > max { - return max, false - } else if x < min { - return min, false - } else { - return x, true - } -} - -// addUint32 encodes x as 7-bit little-endian varint. -// TODO(adonovan): opt: steal top two bits of opcode -// to encode the number of complete bytes that follow. -func addUint32(code []byte, x uint32, min int) []byte { - end := len(code) + min - for x >= 0x80 { - code = append(code, byte(x)|0x80) - x >>= 7 - } - code = append(code, byte(x)) - // Pad the operand with NOPs to exactly min bytes. - for len(code) < end { - code = append(code, byte(NOP)) - } - return code -} - -func argLen(x uint32) int { - n := 0 - for x >= 0x80 { - n++ - x >>= 7 - } - return n + 1 -} - -// PrintOp prints an instruction. -// It is provided for debugging. -func PrintOp(fn *Funcode, pc uint32, op Opcode, arg uint32) { - if op < OpcodeArgMin { - fmt.Fprintf(os.Stderr, "\t%d\t%s\n", pc, op) - return - } - - var comment string - switch op { - case CONSTANT: - switch x := fn.Prog.Constants[arg].(type) { - case string: - comment = strconv.Quote(x) - default: - comment = fmt.Sprint(x) - } - case MAKEFUNC: - comment = fn.Prog.Functions[arg].Name - case SETLOCAL, LOCAL: - comment = fn.Locals[arg].Name - case SETGLOBAL, GLOBAL: - comment = fn.Prog.Globals[arg].Name - case ATTR, SETFIELD, PREDECLARED, UNIVERSAL: - comment = fn.Prog.Names[arg] - case FREE: - comment = fn.Freevars[arg].Name - case CALL, CALL_VAR, CALL_KW, CALL_VAR_KW: - comment = fmt.Sprintf("%d pos, %d named", arg>>8, arg&0xff) - default: - // JMP, CJMP, ITERJMP, MAKETUPLE, MAKELIST, LOAD, UNPACK: - // arg is just a number - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "\t%d\t%-10s\t%d", pc, op, arg) - if comment != "" { - fmt.Fprint(&buf, "\t; ", comment) - } - fmt.Fprintln(&buf) - os.Stderr.Write(buf.Bytes()) -} - -// newBlock returns a new block. -func (fcomp) newBlock() *block { - return &block{index: -1, initialstack: -1} -} - -// emit emits an instruction to the current block. -func (fcomp *fcomp) emit(op Opcode) { - if op >= OpcodeArgMin { - panic("missing arg: " + op.String()) - } - insn := insn{op: op, line: fcomp.pos.Line, col: fcomp.pos.Col} - fcomp.block.insns = append(fcomp.block.insns, insn) - fcomp.pos.Line = 0 - fcomp.pos.Col = 0 -} - -// emit1 emits an instruction with an immediate operand. -func (fcomp *fcomp) emit1(op Opcode, arg uint32) { - if op < OpcodeArgMin { - panic("unwanted arg: " + op.String()) - } - insn := insn{op: op, arg: arg, line: fcomp.pos.Line, col: fcomp.pos.Col} - fcomp.block.insns = append(fcomp.block.insns, insn) - fcomp.pos.Line = 0 - fcomp.pos.Col = 0 -} - -// jump emits a jump to the specified block. -// On return, the current block is unset. -func (fcomp *fcomp) jump(b *block) { - if b == fcomp.block { - panic("self-jump") // unreachable: Starlark has no arbitrary looping constructs - } - fcomp.block.jmp = b - fcomp.block = nil -} - -// condjump emits a conditional jump (CJMP or ITERJMP) -// to the specified true/false blocks. -// (For ITERJMP, the cases are jmp/f/ok and cjmp/t/exhausted.) -// On return, the current block is unset. -func (fcomp *fcomp) condjump(op Opcode, t, f *block) { - if !(op == CJMP || op == ITERJMP) { - panic("not a conditional jump: " + op.String()) - } - fcomp.emit1(op, 0) // fill in address later - fcomp.block.cjmp = t - fcomp.jump(f) -} - -// nameIndex returns the index of the specified name -// within the name pool, adding it if necessary. -func (pcomp *pcomp) nameIndex(name string) uint32 { - index, ok := pcomp.names[name] - if !ok { - index = uint32(len(pcomp.prog.Names)) - pcomp.names[name] = index - pcomp.prog.Names = append(pcomp.prog.Names, name) - } - return index -} - -// constantIndex returns the index of the specified constant -// within the constant pool, adding it if necessary. -func (pcomp *pcomp) constantIndex(v interface{}) uint32 { - index, ok := pcomp.constants[v] - if !ok { - index = uint32(len(pcomp.prog.Constants)) - pcomp.constants[v] = index - pcomp.prog.Constants = append(pcomp.prog.Constants, v) - } - return index -} - -// functionIndex returns the index of the specified function -// AST the nestedfun pool, adding it if necessary. -func (pcomp *pcomp) functionIndex(fn *Funcode) uint32 { - index, ok := pcomp.functions[fn] - if !ok { - index = uint32(len(pcomp.prog.Functions)) - pcomp.functions[fn] = index - pcomp.prog.Functions = append(pcomp.prog.Functions, fn) - } - return index -} - -// string emits code to push the specified string. -func (fcomp *fcomp) string(s string) { - fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(s)) -} - -// setPos sets the current source position. -// It should be called prior to any operation that can fail dynamically. -// All positions are assumed to belong to the same file. -func (fcomp *fcomp) setPos(pos syntax.Position) { - fcomp.pos = pos -} - -// set emits code to store the top-of-stack value -// to the specified local, cell, or global variable. -func (fcomp *fcomp) set(id *syntax.Ident) { - bind := id.Binding.(*resolve.Binding) - switch bind.Scope { - case resolve.Local: - fcomp.emit1(SETLOCAL, uint32(bind.Index)) - case resolve.Cell: - // TODO(adonovan): opt: make a single op for LOCAL, SETCELL. - fcomp.emit1(LOCAL, uint32(bind.Index)) - fcomp.emit(SETCELL) - case resolve.Global: - fcomp.emit1(SETGLOBAL, uint32(bind.Index)) - default: - log.Fatalf("%s: set(%s): not global/local/cell (%d)", id.NamePos, id.Name, bind.Scope) - } -} - -// lookup emits code to push the value of the specified variable. -func (fcomp *fcomp) lookup(id *syntax.Ident) { - bind := id.Binding.(*resolve.Binding) - if bind.Scope != resolve.Universal { // (universal lookup can't fail) - fcomp.setPos(id.NamePos) - } - switch bind.Scope { - case resolve.Local: - fcomp.emit1(LOCAL, uint32(bind.Index)) - case resolve.Free: - // TODO(adonovan): opt: make a single op for FREE, CELL. - fcomp.emit1(FREE, uint32(bind.Index)) - fcomp.emit(CELL) - case resolve.Cell: - // TODO(adonovan): opt: make a single op for LOCAL, CELL. - fcomp.emit1(LOCAL, uint32(bind.Index)) - fcomp.emit(CELL) - case resolve.Global: - fcomp.emit1(GLOBAL, uint32(bind.Index)) - case resolve.Predeclared: - fcomp.emit1(PREDECLARED, fcomp.pcomp.nameIndex(id.Name)) - case resolve.Universal: - fcomp.emit1(UNIVERSAL, fcomp.pcomp.nameIndex(id.Name)) - default: - log.Fatalf("%s: compiler.lookup(%s): scope = %d", id.NamePos, id.Name, bind.Scope) - } -} - -func (fcomp *fcomp) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - fcomp.stmt(stmt) - } -} - -func (fcomp *fcomp) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - if _, ok := stmt.X.(*syntax.Literal); ok { - // Opt: don't compile doc comments only to pop them. - return - } - fcomp.expr(stmt.X) - fcomp.emit(POP) - - case *syntax.BranchStmt: - // Resolver invariant: break/continue appear only within loops. - switch stmt.Token { - case syntax.PASS: - // no-op - case syntax.BREAK: - b := fcomp.loops[len(fcomp.loops)-1].break_ - fcomp.jump(b) - fcomp.block = fcomp.newBlock() // dead code - case syntax.CONTINUE: - b := fcomp.loops[len(fcomp.loops)-1].continue_ - fcomp.jump(b) - fcomp.block = fcomp.newBlock() // dead code - } - - case *syntax.IfStmt: - // Keep consistent with CondExpr. - t := fcomp.newBlock() - f := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.ifelse(stmt.Cond, t, f) - - fcomp.block = t - fcomp.stmts(stmt.True) - fcomp.jump(done) - - fcomp.block = f - fcomp.stmts(stmt.False) - fcomp.jump(done) - - fcomp.block = done - - case *syntax.AssignStmt: - switch stmt.Op { - case syntax.EQ: - // simple assignment: x = y - fcomp.expr(stmt.RHS) - fcomp.assign(stmt.OpPos, stmt.LHS) - - case syntax.PLUS_EQ, - syntax.MINUS_EQ, - syntax.STAR_EQ, - syntax.SLASH_EQ, - syntax.SLASHSLASH_EQ, - syntax.PERCENT_EQ, - syntax.AMP_EQ, - syntax.PIPE_EQ, - syntax.CIRCUMFLEX_EQ, - syntax.LTLT_EQ, - syntax.GTGT_EQ: - // augmented assignment: x += y - - var set func() - - // Evaluate "address" of x exactly once to avoid duplicate side-effects. - switch lhs := unparen(stmt.LHS).(type) { - case *syntax.Ident: - // x = ... - fcomp.lookup(lhs) - set = func() { - fcomp.set(lhs) - } - - case *syntax.IndexExpr: - // x[y] = ... - fcomp.expr(lhs.X) - fcomp.expr(lhs.Y) - fcomp.emit(DUP2) - fcomp.setPos(lhs.Lbrack) - fcomp.emit(INDEX) - set = func() { - fcomp.setPos(lhs.Lbrack) - fcomp.emit(SETINDEX) - } - - case *syntax.DotExpr: - // x.f = ... - fcomp.expr(lhs.X) - fcomp.emit(DUP) - name := fcomp.pcomp.nameIndex(lhs.Name.Name) - fcomp.setPos(lhs.Dot) - fcomp.emit1(ATTR, name) - set = func() { - fcomp.setPos(lhs.Dot) - fcomp.emit1(SETFIELD, name) - } - - default: - panic(lhs) - } - - fcomp.expr(stmt.RHS) - - if stmt.Op == syntax.PLUS_EQ { - // Allow the runtime to optimize list += iterable. - fcomp.setPos(stmt.OpPos) - fcomp.emit(INPLACE_ADD) - } else { - fcomp.binop(stmt.OpPos, stmt.Op-syntax.PLUS_EQ+syntax.PLUS) - } - set() - } - - case *syntax.DefStmt: - fcomp.function(stmt.Function.(*resolve.Function)) - fcomp.set(stmt.Name) - - case *syntax.ForStmt: - // Keep consistent with ForClause. - head := fcomp.newBlock() - body := fcomp.newBlock() - tail := fcomp.newBlock() - - fcomp.expr(stmt.X) - fcomp.setPos(stmt.For) - fcomp.emit(ITERPUSH) - fcomp.jump(head) - - fcomp.block = head - fcomp.condjump(ITERJMP, tail, body) - - fcomp.block = body - fcomp.assign(stmt.For, stmt.Vars) - fcomp.loops = append(fcomp.loops, loop{break_: tail, continue_: head}) - fcomp.stmts(stmt.Body) - fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] - fcomp.jump(head) - - fcomp.block = tail - fcomp.emit(ITERPOP) - - case *syntax.WhileStmt: - head := fcomp.newBlock() - body := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.jump(head) - fcomp.block = head - fcomp.ifelse(stmt.Cond, body, done) - - fcomp.block = body - fcomp.loops = append(fcomp.loops, loop{break_: done, continue_: head}) - fcomp.stmts(stmt.Body) - fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] - fcomp.jump(head) - - fcomp.block = done - - case *syntax.ReturnStmt: - if stmt.Result != nil { - fcomp.expr(stmt.Result) - } else { - fcomp.emit(NONE) - } - fcomp.emit(RETURN) - fcomp.block = fcomp.newBlock() // dead code - - case *syntax.LoadStmt: - for i := range stmt.From { - fcomp.string(stmt.From[i].Name) - } - module := stmt.Module.Value.(string) - fcomp.pcomp.prog.Loads = append(fcomp.pcomp.prog.Loads, Binding{ - Name: module, - Pos: stmt.Module.TokenPos, - }) - fcomp.string(module) - fcomp.setPos(stmt.Load) - fcomp.emit1(LOAD, uint32(len(stmt.From))) - for i := range stmt.To { - fcomp.set(stmt.To[len(stmt.To)-1-i]) - } - - default: - start, _ := stmt.Span() - log.Fatalf("%s: exec: unexpected statement %T", start, stmt) - } -} - -// assign implements lhs = rhs for arbitrary expressions lhs. -// RHS is on top of stack, consumed. -func (fcomp *fcomp) assign(pos syntax.Position, lhs syntax.Expr) { - switch lhs := lhs.(type) { - case *syntax.ParenExpr: - // (lhs) = rhs - fcomp.assign(pos, lhs.X) - - case *syntax.Ident: - // x = rhs - fcomp.set(lhs) - - case *syntax.TupleExpr: - // x, y = rhs - fcomp.assignSequence(pos, lhs.List) - - case *syntax.ListExpr: - // [x, y] = rhs - fcomp.assignSequence(pos, lhs.List) - - case *syntax.IndexExpr: - // x[y] = rhs - fcomp.expr(lhs.X) - fcomp.emit(EXCH) - fcomp.expr(lhs.Y) - fcomp.emit(EXCH) - fcomp.setPos(lhs.Lbrack) - fcomp.emit(SETINDEX) - - case *syntax.DotExpr: - // x.f = rhs - fcomp.expr(lhs.X) - fcomp.emit(EXCH) - fcomp.setPos(lhs.Dot) - fcomp.emit1(SETFIELD, fcomp.pcomp.nameIndex(lhs.Name.Name)) - - default: - panic(lhs) - } -} - -func (fcomp *fcomp) assignSequence(pos syntax.Position, lhs []syntax.Expr) { - fcomp.setPos(pos) - fcomp.emit1(UNPACK, uint32(len(lhs))) - for i := range lhs { - fcomp.assign(pos, lhs[i]) - } -} - -func (fcomp *fcomp) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.ParenExpr: - fcomp.expr(e.X) - - case *syntax.Ident: - fcomp.lookup(e) - - case *syntax.Literal: - // e.Value is int64, float64, *bigInt, or string. - fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(e.Value)) - - case *syntax.ListExpr: - for _, x := range e.List { - fcomp.expr(x) - } - fcomp.emit1(MAKELIST, uint32(len(e.List))) - - case *syntax.CondExpr: - // Keep consistent with IfStmt. - t := fcomp.newBlock() - f := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.ifelse(e.Cond, t, f) - - fcomp.block = t - fcomp.expr(e.True) - fcomp.jump(done) - - fcomp.block = f - fcomp.expr(e.False) - fcomp.jump(done) - - fcomp.block = done - - case *syntax.IndexExpr: - fcomp.expr(e.X) - fcomp.expr(e.Y) - fcomp.setPos(e.Lbrack) - fcomp.emit(INDEX) - - case *syntax.SliceExpr: - fcomp.setPos(e.Lbrack) - fcomp.expr(e.X) - if e.Lo != nil { - fcomp.expr(e.Lo) - } else { - fcomp.emit(NONE) - } - if e.Hi != nil { - fcomp.expr(e.Hi) - } else { - fcomp.emit(NONE) - } - if e.Step != nil { - fcomp.expr(e.Step) - } else { - fcomp.emit(NONE) - } - fcomp.emit(SLICE) - - case *syntax.Comprehension: - if e.Curly { - fcomp.emit(MAKEDICT) - } else { - fcomp.emit1(MAKELIST, 0) - } - fcomp.comprehension(e, 0) - - case *syntax.TupleExpr: - fcomp.tuple(e.List) - - case *syntax.DictExpr: - fcomp.emit(MAKEDICT) - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - fcomp.emit(DUP) - fcomp.expr(entry.Key) - fcomp.expr(entry.Value) - fcomp.setPos(entry.Colon) - fcomp.emit(SETDICTUNIQ) - } - - case *syntax.UnaryExpr: - fcomp.expr(e.X) - fcomp.setPos(e.OpPos) - switch e.Op { - case syntax.MINUS: - fcomp.emit(UMINUS) - case syntax.PLUS: - fcomp.emit(UPLUS) - case syntax.NOT: - fcomp.emit(NOT) - case syntax.TILDE: - fcomp.emit(TILDE) - default: - log.Fatalf("%s: unexpected unary op: %s", e.OpPos, e.Op) - } - - case *syntax.BinaryExpr: - switch e.Op { - // short-circuit operators - // TODO(adonovan): use ifelse to simplify conditions. - case syntax.OR: - // x or y => if x then x else y - done := fcomp.newBlock() - y := fcomp.newBlock() - - fcomp.expr(e.X) - fcomp.emit(DUP) - fcomp.condjump(CJMP, done, y) - - fcomp.block = y - fcomp.emit(POP) // discard X - fcomp.expr(e.Y) - fcomp.jump(done) - - fcomp.block = done - - case syntax.AND: - // x and y => if x then y else x - done := fcomp.newBlock() - y := fcomp.newBlock() - - fcomp.expr(e.X) - fcomp.emit(DUP) - fcomp.condjump(CJMP, y, done) - - fcomp.block = y - fcomp.emit(POP) // discard X - fcomp.expr(e.Y) - fcomp.jump(done) - - fcomp.block = done - - case syntax.PLUS: - fcomp.plus(e) - - default: - // all other strict binary operator (includes comparisons) - fcomp.expr(e.X) - fcomp.expr(e.Y) - fcomp.binop(e.OpPos, e.Op) - } - - case *syntax.DotExpr: - fcomp.expr(e.X) - fcomp.setPos(e.Dot) - fcomp.emit1(ATTR, fcomp.pcomp.nameIndex(e.Name.Name)) - - case *syntax.CallExpr: - fcomp.call(e) - - case *syntax.LambdaExpr: - fcomp.function(e.Function.(*resolve.Function)) - - default: - start, _ := e.Span() - log.Fatalf("%s: unexpected expr %T", start, e) - } -} - -type summand struct { - x syntax.Expr - plusPos syntax.Position -} - -// plus emits optimized code for ((a+b)+...)+z that avoids naive -// quadratic behavior for strings, tuples, and lists, -// and folds together adjacent literals of the same type. -func (fcomp *fcomp) plus(e *syntax.BinaryExpr) { - // Gather all the right operands of the left tree of plusses. - // A tree (((a+b)+c)+d) becomes args=[a +b +c +d]. - args := make([]summand, 0, 2) // common case: 2 operands - for plus := e; ; { - args = append(args, summand{unparen(plus.Y), plus.OpPos}) - left := unparen(plus.X) - x, ok := left.(*syntax.BinaryExpr) - if !ok || x.Op != syntax.PLUS { - args = append(args, summand{x: left}) - break - } - plus = x - } - // Reverse args to syntactic order. - for i, n := 0, len(args)/2; i < n; i++ { - j := len(args) - 1 - i - args[i], args[j] = args[j], args[i] - } - - // Fold sums of adjacent literals of the same type: ""+"", []+[], ()+(). - out := args[:0] // compact in situ - for i := 0; i < len(args); { - j := i + 1 - if code := addable(args[i].x); code != 0 { - for j < len(args) && addable(args[j].x) == code { - j++ - } - if j > i+1 { - args[i].x = add(code, args[i:j]) - } - } - out = append(out, args[i]) - i = j - } - args = out - - // Emit code for an n-ary sum (n > 0). - fcomp.expr(args[0].x) - for _, summand := range args[1:] { - fcomp.expr(summand.x) - fcomp.setPos(summand.plusPos) - fcomp.emit(PLUS) - } - - // If len(args) > 2, use of an accumulator instead of a chain of - // PLUS operations may be more efficient. - // However, no gain was measured on a workload analogous to Bazel loading; - // TODO(adonovan): opt: re-evaluate on a Bazel analysis-like workload. - // - // We cannot use a single n-ary SUM operation - // a b c SUM<3> - // because we need to report a distinct error for each - // individual '+' operation, so three additional operations are - // needed: - // - // ACCSTART => create buffer and append to it - // ACCUM => append to buffer - // ACCEND => get contents of buffer - // - // For string, list, and tuple values, the interpreter can - // optimize these operations by using a mutable buffer. - // For all other types, ACCSTART and ACCEND would behave like - // the identity function and ACCUM behaves like PLUS. - // ACCUM must correctly support user-defined operations - // such as list+foo. - // - // fcomp.emit(ACCSTART) - // for _, summand := range args[1:] { - // fcomp.expr(summand.x) - // fcomp.setPos(summand.plusPos) - // fcomp.emit(ACCUM) - // } - // fcomp.emit(ACCEND) -} - -// addable reports whether e is a statically addable -// expression: a [s]tring, [l]ist, or [t]uple. -func addable(e syntax.Expr) rune { - switch e := e.(type) { - case *syntax.Literal: - // TODO(adonovan): opt: support INT/FLOAT/BIGINT constant folding. - switch e.Token { - case syntax.STRING: - return 's' - } - case *syntax.ListExpr: - return 'l' - case *syntax.TupleExpr: - return 't' - } - return 0 -} - -// add returns an expression denoting the sum of args, -// which are all addable values of the type indicated by code. -// The resulting syntax is degenerate, lacking position, etc. -func add(code rune, args []summand) syntax.Expr { - switch code { - case 's': - var buf bytes.Buffer - for _, arg := range args { - buf.WriteString(arg.x.(*syntax.Literal).Value.(string)) - } - return &syntax.Literal{Token: syntax.STRING, Value: buf.String()} - case 'l': - var elems []syntax.Expr - for _, arg := range args { - elems = append(elems, arg.x.(*syntax.ListExpr).List...) - } - return &syntax.ListExpr{List: elems} - case 't': - var elems []syntax.Expr - for _, arg := range args { - elems = append(elems, arg.x.(*syntax.TupleExpr).List...) - } - return &syntax.TupleExpr{List: elems} - } - panic(code) -} - -func unparen(e syntax.Expr) syntax.Expr { - if p, ok := e.(*syntax.ParenExpr); ok { - return unparen(p.X) - } - return e -} - -func (fcomp *fcomp) binop(pos syntax.Position, op syntax.Token) { - // TODO(adonovan): simplify by assuming syntax and compiler constants align. - fcomp.setPos(pos) - switch op { - // arithmetic - case syntax.PLUS: - fcomp.emit(PLUS) - case syntax.MINUS: - fcomp.emit(MINUS) - case syntax.STAR: - fcomp.emit(STAR) - case syntax.SLASH: - fcomp.emit(SLASH) - case syntax.SLASHSLASH: - fcomp.emit(SLASHSLASH) - case syntax.PERCENT: - fcomp.emit(PERCENT) - case syntax.AMP: - fcomp.emit(AMP) - case syntax.PIPE: - fcomp.emit(PIPE) - case syntax.CIRCUMFLEX: - fcomp.emit(CIRCUMFLEX) - case syntax.LTLT: - fcomp.emit(LTLT) - case syntax.GTGT: - fcomp.emit(GTGT) - case syntax.IN: - fcomp.emit(IN) - case syntax.NOT_IN: - fcomp.emit(IN) - fcomp.emit(NOT) - - // comparisons - case syntax.EQL, - syntax.NEQ, - syntax.GT, - syntax.LT, - syntax.LE, - syntax.GE: - fcomp.emit(Opcode(op-syntax.EQL) + EQL) - - default: - log.Fatalf("%s: unexpected binary op: %s", pos, op) - } -} - -func (fcomp *fcomp) call(call *syntax.CallExpr) { - // TODO(adonovan): opt: Use optimized path for calling methods - // of built-ins: x.f(...) to avoid materializing a closure. - // if dot, ok := call.Fcomp.(*syntax.DotExpr); ok { - // fcomp.expr(dot.X) - // fcomp.args(call) - // fcomp.emit1(CALL_ATTR, fcomp.name(dot.Name.Name)) - // return - // } - - // usual case - fcomp.expr(call.Fn) - op, arg := fcomp.args(call) - fcomp.setPos(call.Lparen) - fcomp.emit1(op, arg) -} - -// args emits code to push a tuple of positional arguments -// and a tuple of named arguments containing alternating keys and values. -// Either or both tuples may be empty (TODO(adonovan): optimize). -func (fcomp *fcomp) args(call *syntax.CallExpr) (op Opcode, arg uint32) { - var callmode int - // Compute the number of each kind of parameter. - var p, n int // number of positional, named arguments - var varargs, kwargs syntax.Expr - for _, arg := range call.Args { - if binary, ok := arg.(*syntax.BinaryExpr); ok && binary.Op == syntax.EQ { - - // named argument (name, value) - fcomp.string(binary.X.(*syntax.Ident).Name) - fcomp.expr(binary.Y) - n++ - continue - } - if unary, ok := arg.(*syntax.UnaryExpr); ok { - if unary.Op == syntax.STAR { - callmode |= 1 - varargs = unary.X - continue - } else if unary.Op == syntax.STARSTAR { - callmode |= 2 - kwargs = unary.X - continue - } - } - - // positional argument - fcomp.expr(arg) - p++ - } - - // Python2 and Python3 both permit named arguments - // to appear both before and after a *args argument: - // f(1, 2, x=3, *[4], y=5, **dict(z=6)) - // - // They also differ in their evaluation order: - // Python2: 1 2 3 5 4 6 (*args and **kwargs evaluated last) - // Python3: 1 2 4 3 5 6 (positional args evaluated before named args) - // Starlark-in-Java historically used a third order: - // Lexical: 1 2 3 4 5 6 (all args evaluated left-to-right) - // - // After discussion in github.com/bazelbuild/starlark#13, the - // spec now requires Starlark to statically reject named - // arguments after *args (e.g. y=5), and to use Python2-style - // evaluation order. This is both easy to implement and - // consistent with lexical order: - // - // f(1, 2, x=3, *[4], **dict(z=6)) # 1 2 3 4 6 - - // *args - if varargs != nil { - fcomp.expr(varargs) - } - - // **kwargs - if kwargs != nil { - fcomp.expr(kwargs) - } - - // TODO(adonovan): avoid this with a more flexible encoding. - if p >= 256 || n >= 256 { - // resolve already checked this; should be unreachable - panic("too many arguments in call") - } - - return CALL + Opcode(callmode), uint32(p<<8 | n) -} - -func (fcomp *fcomp) tuple(elems []syntax.Expr) { - for _, elem := range elems { - fcomp.expr(elem) - } - fcomp.emit1(MAKETUPLE, uint32(len(elems))) -} - -func (fcomp *fcomp) comprehension(comp *syntax.Comprehension, clauseIndex int) { - if clauseIndex == len(comp.Clauses) { - fcomp.emit(DUP) // accumulator - if comp.Curly { - // dict: {k:v for ...} - // Parser ensures that body is of form k:v. - // Python-style set comprehensions {body for vars in x} - // are not supported. - entry := comp.Body.(*syntax.DictEntry) - fcomp.expr(entry.Key) - fcomp.expr(entry.Value) - fcomp.setPos(entry.Colon) - fcomp.emit(SETDICT) - } else { - // list: [body for vars in x] - fcomp.expr(comp.Body) - fcomp.emit(APPEND) - } - return - } - - clause := comp.Clauses[clauseIndex] - switch clause := clause.(type) { - case *syntax.IfClause: - t := fcomp.newBlock() - done := fcomp.newBlock() - fcomp.ifelse(clause.Cond, t, done) - - fcomp.block = t - fcomp.comprehension(comp, clauseIndex+1) - fcomp.jump(done) - - fcomp.block = done - return - - case *syntax.ForClause: - // Keep consistent with ForStmt. - head := fcomp.newBlock() - body := fcomp.newBlock() - tail := fcomp.newBlock() - - fcomp.expr(clause.X) - fcomp.setPos(clause.For) - fcomp.emit(ITERPUSH) - fcomp.jump(head) - - fcomp.block = head - fcomp.condjump(ITERJMP, tail, body) - - fcomp.block = body - fcomp.assign(clause.For, clause.Vars) - fcomp.comprehension(comp, clauseIndex+1) - fcomp.jump(head) - - fcomp.block = tail - fcomp.emit(ITERPOP) - return - } - - start, _ := clause.Span() - log.Fatalf("%s: unexpected comprehension clause %T", start, clause) -} - -func (fcomp *fcomp) function(f *resolve.Function) { - // Evaluation of the defaults may fail, so record the position. - fcomp.setPos(f.Pos) - - // To reduce allocation, we emit a combined tuple - // for the defaults and the freevars. - // The function knows where to split it at run time. - - // Generate tuple of parameter defaults. For: - // def f(p1, p2=dp2, p3=dp3, *, k1, k2=dk2, k3, **kwargs) - // the tuple is: - // (dp2, dp3, MANDATORY, dk2, MANDATORY). - ndefaults := 0 - seenStar := false - for _, param := range f.Params { - switch param := param.(type) { - case *syntax.BinaryExpr: - fcomp.expr(param.Y) - ndefaults++ - case *syntax.UnaryExpr: - seenStar = true // * or *args (also **kwargs) - case *syntax.Ident: - if seenStar { - fcomp.emit(MANDATORY) - ndefaults++ - } - } - } - - // Capture the cells of the function's - // free variables from the lexical environment. - for _, freevar := range f.FreeVars { - // Don't call fcomp.lookup because we want - // the cell itself, not its content. - switch freevar.Scope { - case resolve.Free: - fcomp.emit1(FREE, uint32(freevar.Index)) - case resolve.Cell: - fcomp.emit1(LOCAL, uint32(freevar.Index)) - } - } - - fcomp.emit1(MAKETUPLE, uint32(ndefaults+len(f.FreeVars))) - - funcode := fcomp.pcomp.function(f.Name, f.Pos, f.Body, f.Locals, f.FreeVars) - - if debug { - // TODO(adonovan): do compilations sequentially not as a tree, - // to make the log easier to read. - // Simplify by identifying Toplevel and functionIndex 0. - fmt.Fprintf(os.Stderr, "resuming %s @ %s\n", fcomp.fn.Name, fcomp.pos) - } - - // def f(a, *, b=1) has only 2 parameters. - numParams := len(f.Params) - if f.NumKwonlyParams > 0 && !f.HasVarargs { - numParams-- - } - - funcode.NumParams = numParams - funcode.NumKwonlyParams = f.NumKwonlyParams - funcode.HasVarargs = f.HasVarargs - funcode.HasKwargs = f.HasKwargs - fcomp.emit1(MAKEFUNC, fcomp.pcomp.functionIndex(funcode)) -} - -// ifelse emits a Boolean control flow decision. -// On return, the current block is unset. -func (fcomp *fcomp) ifelse(cond syntax.Expr, t, f *block) { - switch cond := cond.(type) { - case *syntax.UnaryExpr: - if cond.Op == syntax.NOT { - // if not x then goto t else goto f - // => - // if x then goto f else goto t - fcomp.ifelse(cond.X, f, t) - return - } - - case *syntax.BinaryExpr: - switch cond.Op { - case syntax.AND: - // if x and y then goto t else goto f - // => - // if x then ifelse(y, t, f) else goto f - fcomp.expr(cond.X) - y := fcomp.newBlock() - fcomp.condjump(CJMP, y, f) - - fcomp.block = y - fcomp.ifelse(cond.Y, t, f) - return - - case syntax.OR: - // if x or y then goto t else goto f - // => - // if x then goto t else ifelse(y, t, f) - fcomp.expr(cond.X) - y := fcomp.newBlock() - fcomp.condjump(CJMP, t, y) - - fcomp.block = y - fcomp.ifelse(cond.Y, t, f) - return - case syntax.NOT_IN: - // if x not in y then goto t else goto f - // => - // if x in y then goto f else goto t - copy := *cond - copy.Op = syntax.IN - fcomp.expr(©) - fcomp.condjump(CJMP, f, t) - return - } - } - - // general case - fcomp.expr(cond) - fcomp.condjump(CJMP, t, f) -} diff --git a/vendor/github.com/k14s/starlark-go/internal/compile/serial.go b/vendor/github.com/k14s/starlark-go/internal/compile/serial.go deleted file mode 100644 index 5dced7ca8..000000000 --- a/vendor/github.com/k14s/starlark-go/internal/compile/serial.go +++ /dev/null @@ -1,389 +0,0 @@ -package compile - -// This file defines functions to read and write a compile.Program to a file. -// -// It is the client's responsibility to avoid version skew between the -// compiler used to produce a file and the interpreter that consumes it. -// The version number is provided as a constant. -// Incompatible protocol changes should also increment the version number. -// -// Encoding -// -// Program: -// "sky!" [4]byte # magic number -// str uint32le # offset of section -// version varint # must match Version -// filename string -// numloads varint -// loads []Ident -// numnames varint -// names []string -// numconsts varint -// consts []Constant -// numglobals varint -// globals []Ident -// toplevel Funcode -// numfuncs varint -// funcs []Funcode -// []byte # concatenation of all referenced strings -// EOF -// -// Funcode: -// id Ident -// code []byte -// pclinetablen varint -// pclinetab []varint -// numlocals varint -// locals []Ident -// numcells varint -// cells []int -// numfreevars varint -// freevar []Ident -// maxstack varint -// numparams varint -// numkwonlyparams varint -// hasvarargs varint (0 or 1) -// haskwargs varint (0 or 1) -// -// Ident: -// filename string -// line, col varint -// -// Constant: # type data -// type varint # 0=string string -// data ... # 1=int varint -// # 2=float varint (bits as uint64) -// # 3=bigint string (decimal ASCII text) -// -// The encoding starts with a four-byte magic number. -// The next four bytes are a little-endian uint32 -// that provides the offset of the string section -// at the end of the file, which contains the ordered -// concatenation of all strings referenced by the -// program. This design permits the decoder to read -// the first and second parts of the file into different -// memory allocations: the first (the encoded program) -// is transient, but the second (the strings) persists -// for the life of the Program. -// -// Within the encoded program, all strings are referred -// to by their length. As the encoder and decoder process -// the entire file sequentially, they are in lock step, -// so the start offset of each string is implicit. -// -// Program.Code is represented as a []byte slice to permit -// modification when breakpoints are set. All other strings -// are represented as strings. They all (unsafely) share the -// same backing byte slice. -// -// Aside from the str field, all integers are encoded as varints. - -import ( - "encoding/binary" - "fmt" - "math" - "math/big" - debugpkg "runtime/debug" - "unsafe" - - "github.com/k14s/starlark-go/syntax" -) - -const magic = "!sky" - -// Encode encodes a compiled Starlark program. -func (prog *Program) Encode() []byte { - var e encoder - e.p = append(e.p, magic...) - e.p = append(e.p, "????"...) // string data offset; filled in later - e.int(Version) - e.string(prog.Toplevel.Pos.Filename()) - e.bindings(prog.Loads) - e.int(len(prog.Names)) - for _, name := range prog.Names { - e.string(name) - } - e.int(len(prog.Constants)) - for _, c := range prog.Constants { - switch c := c.(type) { - case string: - e.int(0) - e.string(c) - case int64: - e.int(1) - e.int64(c) - case float64: - e.int(2) - e.uint64(math.Float64bits(c)) - case *big.Int: - e.int(3) - e.string(c.Text(10)) - } - } - e.bindings(prog.Globals) - e.function(prog.Toplevel) - e.int(len(prog.Functions)) - for _, fn := range prog.Functions { - e.function(fn) - } - - // Patch in the offset of the string data section. - binary.LittleEndian.PutUint32(e.p[4:8], uint32(len(e.p))) - - return append(e.p, e.s...) -} - -type encoder struct { - p []byte // encoded program - s []byte // strings - tmp [binary.MaxVarintLen64]byte -} - -func (e *encoder) int(x int) { - e.int64(int64(x)) -} - -func (e *encoder) int64(x int64) { - n := binary.PutVarint(e.tmp[:], x) - e.p = append(e.p, e.tmp[:n]...) -} - -func (e *encoder) uint64(x uint64) { - n := binary.PutUvarint(e.tmp[:], x) - e.p = append(e.p, e.tmp[:n]...) -} - -func (e *encoder) string(s string) { - e.int(len(s)) - e.s = append(e.s, s...) -} - -func (e *encoder) bytes(b []byte) { - e.int(len(b)) - e.s = append(e.s, b...) -} - -func (e *encoder) binding(bind Binding) { - e.string(bind.Name) - e.int(int(bind.Pos.Line)) - e.int(int(bind.Pos.Col)) -} - -func (e *encoder) bindings(binds []Binding) { - e.int(len(binds)) - for _, bind := range binds { - e.binding(bind) - } -} - -func (e *encoder) function(fn *Funcode) { - e.binding(Binding{fn.Name, fn.Pos}) - e.string(fn.Doc) - e.bytes(fn.Code) - e.int(len(fn.pclinetab)) - for _, x := range fn.pclinetab { - e.int64(int64(x)) - } - e.bindings(fn.Locals) - e.int(len(fn.Cells)) - for _, index := range fn.Cells { - e.int(index) - } - e.bindings(fn.Freevars) - e.int(fn.MaxStack) - e.int(fn.NumParams) - e.int(fn.NumKwonlyParams) - e.int(b2i(fn.HasVarargs)) - e.int(b2i(fn.HasKwargs)) -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -// DecodeProgram decodes a compiled Starlark program from data. -func DecodeProgram(data []byte) (_ *Program, err error) { - if len(data) < len(magic) { - return nil, fmt.Errorf("not a compiled module: no magic number") - } - if got := string(data[:4]); got != magic { - return nil, fmt.Errorf("not a compiled module: got magic number %q, want %q", - got, magic) - } - defer func() { - if x := recover(); x != nil { - debugpkg.PrintStack() - err = fmt.Errorf("internal error while decoding program: %v", x) - } - }() - - offset := binary.LittleEndian.Uint32(data[4:8]) - d := decoder{ - p: data[8:offset], - s: append([]byte(nil), data[offset:]...), // allocate a copy, which will persist - } - - if v := d.int(); v != Version { - return nil, fmt.Errorf("version mismatch: read %d, want %d", v, Version) - } - - filename := d.string() - d.filename = &filename - - loads := d.bindings() - - names := make([]string, d.int()) - for i := range names { - names[i] = d.string() - } - - // constants - constants := make([]interface{}, d.int()) - for i := range constants { - var c interface{} - switch d.int() { - case 0: - c = d.string() - case 1: - c = d.int64() - case 2: - c = math.Float64frombits(d.uint64()) - case 3: - c, _ = new(big.Int).SetString(d.string(), 10) - } - constants[i] = c - } - - globals := d.bindings() - toplevel := d.function() - funcs := make([]*Funcode, d.int()) - for i := range funcs { - funcs[i] = d.function() - } - - prog := &Program{ - Loads: loads, - Names: names, - Constants: constants, - Globals: globals, - Functions: funcs, - Toplevel: toplevel, - } - toplevel.Prog = prog - for _, f := range funcs { - f.Prog = prog - } - - if len(d.p)+len(d.s) > 0 { - return nil, fmt.Errorf("internal error: unconsumed data during decoding") - } - - return prog, nil -} - -type decoder struct { - p []byte // encoded program - s []byte // strings - filename *string // (indirect to avoid keeping decoder live) -} - -func (d *decoder) int() int { - return int(d.int64()) -} - -func (d *decoder) int64() int64 { - x, len := binary.Varint(d.p[:]) - d.p = d.p[len:] - return x -} - -func (d *decoder) uint64() uint64 { - x, len := binary.Uvarint(d.p[:]) - d.p = d.p[len:] - return x -} - -func (d *decoder) string() (s string) { - if slice := d.bytes(); len(slice) > 0 { - // Avoid a memory allocation for each string - // by unsafely aliasing slice. - type string struct { - data *byte - len int - } - ptr := (*string)(unsafe.Pointer(&s)) - ptr.data = &slice[0] - ptr.len = len(slice) - } - return s -} - -func (d *decoder) bytes() []byte { - len := d.int() - r := d.s[:len:len] - d.s = d.s[len:] - return r -} - -func (d *decoder) binding() Binding { - name := d.string() - line := int32(d.int()) - col := int32(d.int()) - return Binding{Name: name, Pos: syntax.MakePosition(d.filename, line, col)} -} - -func (d *decoder) bindings() []Binding { - bindings := make([]Binding, d.int()) - for i := range bindings { - bindings[i] = d.binding() - } - return bindings -} - -func (d *decoder) ints() []int { - ints := make([]int, d.int()) - for i := range ints { - ints[i] = d.int() - } - return ints -} - -func (d *decoder) bool() bool { return d.int() != 0 } - -func (d *decoder) function() *Funcode { - id := d.binding() - doc := d.string() - code := d.bytes() - pclinetab := make([]uint16, d.int()) - for i := range pclinetab { - pclinetab[i] = uint16(d.int()) - } - locals := d.bindings() - cells := d.ints() - freevars := d.bindings() - maxStack := d.int() - numParams := d.int() - numKwonlyParams := d.int() - hasVarargs := d.int() != 0 - hasKwargs := d.int() != 0 - return &Funcode{ - // Prog is filled in later. - Pos: id.Pos, - Name: id.Name, - Doc: doc, - Code: code, - pclinetab: pclinetab, - Locals: locals, - Cells: cells, - Freevars: freevars, - MaxStack: maxStack, - NumParams: numParams, - NumKwonlyParams: numKwonlyParams, - HasVarargs: hasVarargs, - HasKwargs: hasKwargs, - } -} diff --git a/vendor/github.com/k14s/starlark-go/internal/spell/spell.go b/vendor/github.com/k14s/starlark-go/internal/spell/spell.go deleted file mode 100644 index 7739fabaa..000000000 --- a/vendor/github.com/k14s/starlark-go/internal/spell/spell.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package spell file defines a simple spelling checker for use in attribute errors -// such as "no such field .foo; did you mean .food?". -package spell - -import ( - "strings" - "unicode" -) - -// Nearest returns the element of candidates -// nearest to x using the Levenshtein metric, -// or "" if none were promising. -func Nearest(x string, candidates []string) string { - // Ignore underscores and case when matching. - fold := func(s string) string { - return strings.Map(func(r rune) rune { - if r == '_' { - return -1 - } - return unicode.ToLower(r) - }, s) - } - - x = fold(x) - - var best string - bestD := (len(x) + 1) / 2 // allow up to 50% typos - for _, c := range candidates { - d := levenshtein(x, fold(c), bestD) - if d < bestD { - bestD = d - best = c - } - } - return best -} - -// levenshtein returns the non-negative Levenshtein edit distance -// between the byte strings x and y. -// -// If the computed distance exceeds max, -// the function may return early with an approximate value > max. -func levenshtein(x, y string, max int) int { - // This implementation is derived from one by Laurent Le Brun in - // Bazel that uses the single-row space efficiency trick - // described at bitbucket.org/clearer/iosifovich. - - // Let x be the shorter string. - if len(x) > len(y) { - x, y = y, x - } - - // Remove common prefix. - for i := 0; i < len(x); i++ { - if x[i] != y[i] { - x = x[i:] - y = y[i:] - break - } - } - if x == "" { - return len(y) - } - - if d := abs(len(x) - len(y)); d > max { - return d // excessive length divergence - } - - row := make([]int, len(y)+1) - for i := range row { - row[i] = i - } - - for i := 1; i <= len(x); i++ { - row[0] = i - best := i - prev := i - 1 - for j := 1; j <= len(y); j++ { - a := prev + b2i(x[i-1] != y[j-1]) // substitution - b := 1 + row[j-1] // deletion - c := 1 + row[j] // insertion - k := min(a, min(b, c)) - prev, row[j] = row[j], k - best = min(best, k) - } - if best > max { - return best - } - } - return row[len(y)] -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} - -func abs(x int) int { - if x >= 0 { - return x - } else { - return -x - } -} diff --git a/vendor/github.com/k14s/starlark-go/resolve/binding.go b/vendor/github.com/k14s/starlark-go/resolve/binding.go deleted file mode 100644 index 3e1b52699..000000000 --- a/vendor/github.com/k14s/starlark-go/resolve/binding.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package resolve - -import "github.com/k14s/starlark-go/syntax" - -// This file defines resolver data types saved in the syntax tree. -// We cannot guarantee API stability for these types -// as they are closely tied to the implementation. - -// A Binding contains resolver information about an identifer. -// The resolver populates the Binding field of each syntax.Identifier. -// The Binding ties together all identifiers that denote the same variable. -type Binding struct { - Scope Scope - - // Index records the index into the enclosing - // - {DefStmt,File}.Locals, if Scope==Local - // - DefStmt.FreeVars, if Scope==Free - // - File.Globals, if Scope==Global. - // It is zero if Scope is Predeclared, Universal, or Undefined. - Index int - - First *syntax.Ident // first binding use (iff Scope==Local/Free/Global) -} - -// The Scope of Binding indicates what kind of scope it has. -type Scope uint8 - -const ( - Undefined Scope = iota // name is not defined - Local // name is local to its function or file - Cell // name is function-local but shared with a nested function - Free // name is cell of some enclosing function - Global // name is global to module - Predeclared // name is predeclared for this module (e.g. glob) - Universal // name is universal (e.g. len) -) - -var scopeNames = [...]string{ - Undefined: "undefined", - Local: "local", - Cell: "cell", - Free: "free", - Global: "global", - Predeclared: "predeclared", - Universal: "universal", -} - -func (scope Scope) String() string { return scopeNames[scope] } - -// A Module contains resolver information about a file. -// The resolver populates the Module field of each syntax.File. -type Module struct { - Locals []*Binding // the file's (comprehension-)local variables - Globals []*Binding // the file's global variables -} - -// A Function contains resolver information about a named or anonymous function. -// The resolver populates the Function field of each syntax.DefStmt and syntax.LambdaExpr. -type Function struct { - Pos syntax.Position // of DEF or LAMBDA - Name string // name of def, or "lambda" - Params []syntax.Expr // param = ident | ident=expr | * | *ident | **ident - Body []syntax.Stmt // contains synthetic 'return expr' for lambda - - HasVarargs bool // whether params includes *args (convenience) - HasKwargs bool // whether params includes **kwargs (convenience) - NumKwonlyParams int // number of keyword-only optional parameters - Locals []*Binding // this function's local/cell variables, parameters first - FreeVars []*Binding // enclosing cells to capture in closure -} diff --git a/vendor/github.com/k14s/starlark-go/resolve/resolve.go b/vendor/github.com/k14s/starlark-go/resolve/resolve.go deleted file mode 100644 index 96828e76a..000000000 --- a/vendor/github.com/k14s/starlark-go/resolve/resolve.go +++ /dev/null @@ -1,959 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package resolve defines a name-resolution pass for Starlark abstract -// syntax trees. -// -// The resolver sets the Locals and FreeVars arrays of each DefStmt and -// the LocalIndex field of each syntax.Ident that refers to a local or -// free variable. It also sets the Locals array of a File for locals -// bound by top-level comprehensions and load statements. -// Identifiers for global variables do not get an index. -package resolve // import "github.com/k14s/starlark-go/resolve" - -// All references to names are statically resolved. Names may be -// predeclared, global, or local to a function or file. -// File-local variables include those bound by top-level comprehensions -// and by load statements. ("Top-level" means "outside of any function".) -// The resolver maps each global name to a small integer and each local -// name to a small integer; these integers enable a fast and compact -// representation of globals and locals in the evaluator. -// -// As an optimization, the resolver classifies each predeclared name as -// either universal (e.g. None, len) or per-module (e.g. glob in Bazel's -// build language), enabling the evaluator to share the representation -// of the universal environment across all modules. -// -// The lexical environment is a tree of blocks with the file block at -// its root. The file's child blocks may be of two kinds: functions -// and comprehensions, and these may have further children of either -// kind. -// -// Python-style resolution requires multiple passes because a name is -// determined to be local to a function only if the function contains a -// "binding" use of it; similarly, a name is determined to be global (as -// opposed to predeclared) if the module contains a top-level binding use. -// Unlike ordinary top-level assignments, the bindings created by load -// statements are local to the file block. -// A non-binding use may lexically precede the binding to which it is resolved. -// In the first pass, we inspect each function, recording in -// 'uses' each identifier and the environment block in which it occurs. -// If a use of a name is binding, such as a function parameter or -// assignment, we add the name to the block's bindings mapping and add a -// local variable to the enclosing function. -// -// As we finish resolving each function, we inspect all the uses within -// that function and discard ones that were found to be function-local. The -// remaining ones must be either free (local to some lexically enclosing -// function), or top-level (global, predeclared, or file-local), but we cannot tell -// which until we have finished inspecting the outermost enclosing -// function. At that point, we can distinguish local from top-level names -// (and this is when Python would compute free variables). -// -// However, Starlark additionally requires that all references to global -// names are satisfied by some declaration in the current module; -// Starlark permits a function to forward-reference a global or file-local -// that has not -// been declared yet so long as it is declared before the end of the -// module. So, instead of re-resolving the unresolved references after -// each top-level function, we defer this until the end of the module -// and ensure that all such references are satisfied by some definition. -// -// At the end of the module, we visit each of the nested function blocks -// in bottom-up order, doing a recursive lexical lookup for each -// unresolved name. If the name is found to be local to some enclosing -// function, we must create a DefStmt.FreeVar (capture) parameter for -// each intervening function. We enter these synthetic bindings into -// the bindings map so that we create at most one freevar per name. If -// the name was not local, we check that it was defined at module level. -// -// We resolve all uses of locals in the module (due to load statements -// and comprehensions) in a similar way and compute the file's set of -// local variables. -// -// Starlark enforces that all global names are assigned at most once on -// all control flow paths by forbidding if/else statements and loops at -// top level. A global may be used before it is defined, leading to a -// dynamic error. However, the AllowGlobalReassign flag (really: allow -// top-level reassign) makes the resolver allow multiple to a variable -// at top-level. It also allows if-, for-, and while-loops at top-level, -// which in turn may make the evaluator dynamically assign multiple -// values to a variable at top-level. (These two roles should be separated.) - -import ( - "fmt" - "log" - "sort" - "strings" - - "github.com/k14s/starlark-go/internal/spell" - "github.com/k14s/starlark-go/syntax" -) - -const debug = false -const doesnt = "this Starlark dialect does not " - -// global options -// These features are either not standard Starlark (yet), or deprecated -// features of the BUILD language, so we put them behind flags. -var ( - AllowNestedDef = false // allow def statements within function bodies - AllowLambda = false // allow lambda expressions - AllowFloat = false // allow floating point literals, the 'float' built-in, and x / y - AllowSet = false // allow the 'set' built-in - AllowGlobalReassign = false // allow reassignment to top-level names; also, allow if/for/while at top-level - AllowRecursion = false // allow while statements and recursive functions - AllowBitwise = true // obsolete; bitwise operations (&, |, ^, ~, <<, and >>) are always enabled - LoadBindsGlobally = false // load creates global not file-local bindings (deprecated) -) - -// File resolves the specified file and records information about the -// module in file.Module. -// -// The isPredeclared and isUniversal predicates report whether a name is -// a pre-declared identifier (visible in the current module) or a -// universal identifier (visible in every module). -// Clients should typically pass predeclared.Has for the first and -// starlark.Universe.Has for the second, where predeclared is the -// module's StringDict of predeclared names and starlark.Universe is the -// standard set of built-ins. -// The isUniverse predicate is supplied a parameter to avoid a cyclic -// dependency upon starlark.Universe, not because users should ever need -// to redefine it. -func File(file *syntax.File, isPredeclared, isUniversal func(name string) bool) error { - r := newResolver(isPredeclared, isUniversal) - r.stmts(file.Stmts) - - r.env.resolveLocalUses() - - // At the end of the module, resolve all non-local variable references, - // computing closures. - // Function bodies may contain forward references to later global declarations. - r.resolveNonLocalUses(r.env) - - file.Module = &Module{ - Locals: r.moduleLocals, - Globals: r.moduleGlobals, - } - - if len(r.errors) > 0 { - return r.errors - } - return nil -} - -// Expr resolves the specified expression. -// It returns the local variables bound within the expression. -// -// The isPredeclared and isUniversal predicates behave as for the File function. -func Expr(expr syntax.Expr, isPredeclared, isUniversal func(name string) bool) ([]*Binding, error) { - r := newResolver(isPredeclared, isUniversal) - r.expr(expr) - r.env.resolveLocalUses() - r.resolveNonLocalUses(r.env) // globals & universals - if len(r.errors) > 0 { - return nil, r.errors - } - return r.moduleLocals, nil -} - -// An ErrorList is a non-empty list of resolver error messages. -type ErrorList []Error // len > 0 - -func (e ErrorList) Error() string { return e[0].Error() } - -// An Error describes the nature and position of a resolver error. -type Error struct { - Pos syntax.Position - Msg string -} - -func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } - -func newResolver(isPredeclared, isUniversal func(name string) bool) *resolver { - file := new(block) - return &resolver{ - file: file, - env: file, - isPredeclared: isPredeclared, - isUniversal: isUniversal, - globals: make(map[string]*Binding), - predeclared: make(map[string]*Binding), - } -} - -type resolver struct { - // env is the current local environment: - // a linked list of blocks, innermost first. - // The tail of the list is the file block. - env *block - file *block // file block (contains load bindings) - - // moduleLocals contains the local variables of the module - // (due to load statements and comprehensions outside any function). - // moduleGlobals contains the global variables of the module. - moduleLocals []*Binding - moduleGlobals []*Binding - - // globals maps each global name in the module to its binding. - // predeclared does the same for predeclared and universal names. - globals map[string]*Binding - predeclared map[string]*Binding - - // These predicates report whether a name is - // pre-declared, either in this module or universally. - isPredeclared, isUniversal func(name string) bool - - loops int // number of enclosing for loops - - errors ErrorList -} - -// container returns the innermost enclosing "container" block: -// a function (function != nil) or file (function == nil). -// Container blocks accumulate local variable bindings. -func (r *resolver) container() *block { - for b := r.env; ; b = b.parent { - if b.function != nil || b == r.file { - return b - } - } -} - -func (r *resolver) push(b *block) { - r.env.children = append(r.env.children, b) - b.parent = r.env - r.env = b -} - -func (r *resolver) pop() { r.env = r.env.parent } - -type block struct { - parent *block // nil for file block - - // In the file (root) block, both these fields are nil. - function *Function // only for function blocks - comp *syntax.Comprehension // only for comprehension blocks - - // bindings maps a name to its binding. - // A local binding has an index into its innermost enclosing container's locals array. - // A free binding has an index into its innermost enclosing function's freevars array. - bindings map[string]*Binding - - // children records the child blocks of the current one. - children []*block - - // uses records all identifiers seen in this container (function or file), - // and a reference to the environment in which they appear. - // As we leave each container block, we resolve them, - // so that only free and global ones remain. - // At the end of each top-level function we compute closures. - uses []use -} - -func (b *block) bind(name string, bind *Binding) { - if b.bindings == nil { - b.bindings = make(map[string]*Binding) - } - b.bindings[name] = bind -} - -func (b *block) String() string { - if b.function != nil { - return "function block at " + fmt.Sprint(b.function.Pos) - } - if b.comp != nil { - return "comprehension block at " + fmt.Sprint(b.comp.Span()) - } - return "file block" -} - -func (r *resolver) errorf(posn syntax.Position, format string, args ...interface{}) { - r.errors = append(r.errors, Error{posn, fmt.Sprintf(format, args...)}) -} - -// A use records an identifier and the environment in which it appears. -type use struct { - id *syntax.Ident - env *block -} - -// bind creates a binding for id: a global (not file-local) -// binding at top-level, a local binding otherwise. -// At top-level, it reports an error if a global or file-local -// binding already exists, unless AllowGlobalReassign. -// It sets id.Binding to the binding (whether old or new), -// and returns whether a binding already existed. -func (r *resolver) bind(id *syntax.Ident) bool { - // Binding outside any local (comprehension/function) block? - if r.env == r.file { - bind, ok := r.file.bindings[id.Name] - if !ok { - bind, ok = r.globals[id.Name] - if !ok { - // first global binding of this name - bind = &Binding{ - First: id, - Scope: Global, - Index: len(r.moduleGlobals), - } - r.globals[id.Name] = bind - r.moduleGlobals = append(r.moduleGlobals, bind) - } - } - if ok && !AllowGlobalReassign { - r.errorf(id.NamePos, "cannot reassign %s %s declared at %s", - bind.Scope, id.Name, bind.First.NamePos) - } - id.Binding = bind - return ok - } - - return r.bindLocal(id) -} - -func (r *resolver) bindLocal(id *syntax.Ident) bool { - // Mark this name as local to current block. - // Assign it a new local (positive) index in the current container. - _, ok := r.env.bindings[id.Name] - if !ok { - var locals *[]*Binding - if fn := r.container().function; fn != nil { - locals = &fn.Locals - } else { - locals = &r.moduleLocals - } - bind := &Binding{ - First: id, - Scope: Local, - Index: len(*locals), - } - r.env.bind(id.Name, bind) - *locals = append(*locals, bind) - } - - r.use(id) - return ok -} - -func (r *resolver) use(id *syntax.Ident) { - use := use{id, r.env} - - // The spec says that if there is a global binding of a name - // then all references to that name in that block refer to the - // global, even if the use precedes the def---just as for locals. - // For example, in this code, - // - // print(len); len=1; print(len) - // - // both occurrences of len refer to the len=1 binding, which - // completely shadows the predeclared len function. - // - // The rationale for these semantics, which differ from Python, - // is that the static meaning of len (a reference to a global) - // does not change depending on where it appears in the file. - // Of course, its dynamic meaning does change, from an error - // into a valid reference, so it's not clear these semantics - // have any practical advantage. - // - // In any case, the Bazel implementation lags behind the spec - // and follows Python behavior, so the first use of len refers - // to the predeclared function. This typically used in a BUILD - // file that redefines a predeclared name half way through, - // for example: - // - // proto_library(...) # built-in rule - // load("myproto.bzl", "proto_library") - // proto_library(...) # user-defined rule - // - // We will piggyback support for the legacy semantics on the - // AllowGlobalReassign flag, which is loosely related and also - // required for Bazel. - if AllowGlobalReassign && r.env == r.file { - r.useToplevel(use) - return - } - - b := r.container() - b.uses = append(b.uses, use) -} - -// useToplevel resolves use.id as a reference to a name visible at top-level. -// The use.env field captures the original environment for error reporting. -func (r *resolver) useToplevel(use use) (bind *Binding) { - id := use.id - - if prev, ok := r.file.bindings[id.Name]; ok { - // use of load-defined name in file block - bind = prev - } else if prev, ok := r.globals[id.Name]; ok { - // use of global declared by module - bind = prev - } else if prev, ok := r.predeclared[id.Name]; ok { - // repeated use of predeclared or universal - bind = prev - } else if r.isPredeclared(id.Name) { - // use of pre-declared name - bind = &Binding{Scope: Predeclared} - r.predeclared[id.Name] = bind // save it - } else if r.isUniversal(id.Name) { - // use of universal name - if !AllowFloat && id.Name == "float" { - r.errorf(id.NamePos, doesnt+"support floating point") - } - if !AllowSet && id.Name == "set" { - r.errorf(id.NamePos, doesnt+"support sets") - } - bind = &Binding{Scope: Universal} - r.predeclared[id.Name] = bind // save it - } else { - bind = &Binding{Scope: Undefined} - var hint string - if n := r.spellcheck(use); n != "" { - hint = fmt.Sprintf(" (did you mean %s?)", n) - } - r.errorf(id.NamePos, "undefined: %s%s", id.Name, hint) - } - id.Binding = bind - return bind -} - -// spellcheck returns the most likely misspelling of -// the name use.id in the environment use.env. -func (r *resolver) spellcheck(use use) string { - var names []string - - // locals - for b := use.env; b != nil; b = b.parent { - for name := range b.bindings { - names = append(names, name) - } - } - - // globals - // - // We have no way to enumerate predeclared/universe, - // which includes prior names in the REPL session. - for _, bind := range r.moduleGlobals { - names = append(names, bind.First.Name) - } - - sort.Strings(names) - return spell.Nearest(use.id.Name, names) -} - -// resolveLocalUses is called when leaving a container (function/module) -// block. It resolves all uses of locals/cells within that block. -func (b *block) resolveLocalUses() { - unresolved := b.uses[:0] - for _, use := range b.uses { - if bind := lookupLocal(use); bind != nil && (bind.Scope == Local || bind.Scope == Cell) { - use.id.Binding = bind - } else { - unresolved = append(unresolved, use) - } - } - b.uses = unresolved -} - -func (r *resolver) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - r.stmt(stmt) - } -} - -func (r *resolver) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - r.expr(stmt.X) - - case *syntax.BranchStmt: - if r.loops == 0 && (stmt.Token == syntax.BREAK || stmt.Token == syntax.CONTINUE) { - r.errorf(stmt.TokenPos, "%s not in a loop", stmt.Token) - } - - case *syntax.IfStmt: - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.If, "if statement not within a function") - } - r.expr(stmt.Cond) - r.stmts(stmt.True) - r.stmts(stmt.False) - - case *syntax.AssignStmt: - r.expr(stmt.RHS) - isAugmented := stmt.Op != syntax.EQ - r.assign(stmt.LHS, isAugmented) - - case *syntax.DefStmt: - if !AllowNestedDef && r.container().function != nil { - r.errorf(stmt.Def, doesnt+"support nested def") - } - r.bind(stmt.Name) - fn := &Function{ - Name: stmt.Name.Name, - Pos: stmt.Def, - Params: stmt.Params, - Body: stmt.Body, - } - stmt.Function = fn - r.function(fn, stmt.Def) - - case *syntax.ForStmt: - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.For, "for loop not within a function") - } - r.expr(stmt.X) - const isAugmented = false - r.assign(stmt.Vars, isAugmented) - r.loops++ - r.stmts(stmt.Body) - r.loops-- - - case *syntax.WhileStmt: - if !AllowRecursion { - r.errorf(stmt.While, doesnt+"support while loops") - } - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.While, "while loop not within a function") - } - r.expr(stmt.Cond) - r.loops++ - r.stmts(stmt.Body) - r.loops-- - - case *syntax.ReturnStmt: - if r.container().function == nil { - r.errorf(stmt.Return, "return statement not within a function") - } - if stmt.Result != nil { - r.expr(stmt.Result) - } - - case *syntax.LoadStmt: - if r.container().function != nil { - r.errorf(stmt.Load, "load statement within a function") - } - - for i, from := range stmt.From { - if from.Name == "" { - r.errorf(from.NamePos, "load: empty identifier") - continue - } - if from.Name[0] == '_' { - r.errorf(from.NamePos, "load: names with leading underscores are not exported: %s", from.Name) - } - - id := stmt.To[i] - if LoadBindsGlobally { - r.bind(id) - } else if r.bindLocal(id) && !AllowGlobalReassign { - // "Global" in AllowGlobalReassign is a misnomer for "toplevel". - // Sadly we can't report the previous declaration - // as id.Binding may not be set yet. - r.errorf(id.NamePos, "cannot reassign top-level %s", id.Name) - } - } - - default: - log.Fatalf("unexpected stmt %T", stmt) - } -} - -func (r *resolver) assign(lhs syntax.Expr, isAugmented bool) { - switch lhs := lhs.(type) { - case *syntax.Ident: - // x = ... - r.bind(lhs) - - case *syntax.IndexExpr: - // x[i] = ... - r.expr(lhs.X) - r.expr(lhs.Y) - - case *syntax.DotExpr: - // x.f = ... - r.expr(lhs.X) - - case *syntax.TupleExpr: - // (x, y) = ... - if len(lhs.List) == 0 { - r.errorf(syntax.Start(lhs), "can't assign to ()") - } - if isAugmented { - r.errorf(syntax.Start(lhs), "can't use tuple expression in augmented assignment") - } - for _, elem := range lhs.List { - r.assign(elem, isAugmented) - } - - case *syntax.ListExpr: - // [x, y, z] = ... - if len(lhs.List) == 0 { - r.errorf(syntax.Start(lhs), "can't assign to []") - } - if isAugmented { - r.errorf(syntax.Start(lhs), "can't use list expression in augmented assignment") - } - for _, elem := range lhs.List { - r.assign(elem, isAugmented) - } - - case *syntax.ParenExpr: - r.assign(lhs.X, isAugmented) - - default: - name := strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", lhs), "*syntax.")) - r.errorf(syntax.Start(lhs), "can't assign to %s", name) - } -} - -func (r *resolver) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.Ident: - r.use(e) - - case *syntax.Literal: - if !AllowFloat && e.Token == syntax.FLOAT { - r.errorf(e.TokenPos, doesnt+"support floating point") - } - - case *syntax.ListExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.CondExpr: - r.expr(e.Cond) - r.expr(e.True) - r.expr(e.False) - - case *syntax.IndexExpr: - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DictEntry: - r.expr(e.Key) - r.expr(e.Value) - - case *syntax.SliceExpr: - r.expr(e.X) - if e.Lo != nil { - r.expr(e.Lo) - } - if e.Hi != nil { - r.expr(e.Hi) - } - if e.Step != nil { - r.expr(e.Step) - } - - case *syntax.Comprehension: - // The 'in' operand of the first clause (always a ForClause) - // is resolved in the outer block; consider: [x for x in x]. - clause := e.Clauses[0].(*syntax.ForClause) - r.expr(clause.X) - - // A list/dict comprehension defines a new lexical block. - // Locals defined within the block will be allotted - // distinct slots in the locals array of the innermost - // enclosing container (function/module) block. - r.push(&block{comp: e}) - - const isAugmented = false - r.assign(clause.Vars, isAugmented) - - for _, clause := range e.Clauses[1:] { - switch clause := clause.(type) { - case *syntax.IfClause: - r.expr(clause.Cond) - case *syntax.ForClause: - r.assign(clause.Vars, isAugmented) - r.expr(clause.X) - } - } - r.expr(e.Body) // body may be *DictEntry - r.pop() - - case *syntax.TupleExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.DictExpr: - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - r.expr(entry.Key) - r.expr(entry.Value) - } - - case *syntax.UnaryExpr: - r.expr(e.X) - - case *syntax.BinaryExpr: - if !AllowFloat && e.Op == syntax.SLASH { - r.errorf(e.OpPos, doesnt+"support floating point (use //)") - } - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DotExpr: - r.expr(e.X) - // ignore e.Name - - case *syntax.CallExpr: - r.expr(e.Fn) - var seenVarargs, seenKwargs bool - var seenName map[string]bool - var n, p int - for _, arg := range e.Args { - pos, _ := arg.Span() - if unop, ok := arg.(*syntax.UnaryExpr); ok && unop.Op == syntax.STARSTAR { - // **kwargs - if seenKwargs { - r.errorf(pos, "multiple **kwargs not allowed") - } - seenKwargs = true - r.expr(arg) - } else if ok && unop.Op == syntax.STAR { - // *args - if seenKwargs { - r.errorf(pos, "*args may not follow **kwargs") - } else if seenVarargs { - r.errorf(pos, "multiple *args not allowed") - } - seenVarargs = true - r.expr(arg) - } else if binop, ok := arg.(*syntax.BinaryExpr); ok && binop.Op == syntax.EQ { - // k=v - n++ - if seenKwargs { - r.errorf(pos, "argument may not follow **kwargs") - } - x := binop.X.(*syntax.Ident) - if seenName[x.Name] { - r.errorf(x.NamePos, "keyword argument %s repeated", x.Name) - } else { - if seenName == nil { - seenName = make(map[string]bool) - } - seenName[x.Name] = true - } - r.expr(binop.Y) - } else { - // positional argument - p++ - if seenVarargs { - r.errorf(pos, "argument may not follow *args") - } else if seenKwargs { - r.errorf(pos, "argument may not follow **kwargs") - } else if len(seenName) > 0 { - r.errorf(pos, "positional argument may not follow named") - } - r.expr(arg) - } - } - - // Fail gracefully if compiler-imposed limit is exceeded. - if p >= 256 { - pos, _ := e.Span() - r.errorf(pos, "%v positional arguments in call, limit is 255", p) - } - if n >= 256 { - pos, _ := e.Span() - r.errorf(pos, "%v keyword arguments in call, limit is 255", n) - } - - case *syntax.LambdaExpr: - if !AllowLambda { - r.errorf(e.Lambda, doesnt+"support lambda") - } - fn := &Function{ - Name: "lambda", - Pos: e.Lambda, - Params: e.Params, - Body: []syntax.Stmt{&syntax.ReturnStmt{Result: e.Body}}, - } - e.Function = fn - r.function(fn, e.Lambda) - - case *syntax.ParenExpr: - r.expr(e.X) - - default: - log.Fatalf("unexpected expr %T", e) - } -} - -func (r *resolver) function(function *Function, pos syntax.Position) { - // Resolve defaults in enclosing environment. - for _, param := range function.Params { - if binary, ok := param.(*syntax.BinaryExpr); ok { - r.expr(binary.Y) - } - } - - // Enter function block. - b := &block{function: function} - r.push(b) - - var seenOptional bool - var star *syntax.UnaryExpr // * or *args param - var starStar *syntax.Ident // **kwargs ident - var numKwonlyParams int - for _, param := range function.Params { - switch param := param.(type) { - case *syntax.Ident: - // e.g. x - if starStar != nil { - r.errorf(param.NamePos, "required parameter may not follow **%s", starStar.Name) - } else if star != nil { - numKwonlyParams++ - } else if seenOptional { - r.errorf(param.NamePos, "required parameter may not follow optional") - } - if r.bind(param) { - r.errorf(param.NamePos, "duplicate parameter: %s", param.Name) - } - - case *syntax.BinaryExpr: - // e.g. y=dflt - if starStar != nil { - r.errorf(param.OpPos, "optional parameter may not follow **%s", starStar.Name) - } else if star != nil { - numKwonlyParams++ - } - if id := param.X.(*syntax.Ident); r.bind(id) { - r.errorf(param.OpPos, "duplicate parameter: %s", id.Name) - } - seenOptional = true - - case *syntax.UnaryExpr: - // * or *args or **kwargs - if param.Op == syntax.STAR { - if starStar != nil { - r.errorf(param.OpPos, "* parameter may not follow **%s", starStar.Name) - } else if star != nil { - r.errorf(param.OpPos, "multiple * parameters not allowed") - } else { - star = param - } - } else { - if starStar != nil { - r.errorf(param.OpPos, "multiple ** parameters not allowed") - } - starStar = param.X.(*syntax.Ident) - } - } - } - - // Bind the *args and **kwargs parameters at the end, - // so that regular parameters a/b/c are contiguous and - // there is no hole for the "*": - // def f(a, b, *args, c=0, **kwargs) - // def f(a, b, *, c=0, **kwargs) - if star != nil { - if id, _ := star.X.(*syntax.Ident); id != nil { - // *args - if r.bind(id) { - r.errorf(id.NamePos, "duplicate parameter: %s", id.Name) - } - function.HasVarargs = true - } else if numKwonlyParams == 0 { - r.errorf(star.OpPos, "bare * must be followed by keyword-only parameters") - } - } - if starStar != nil { - if r.bind(starStar) { - r.errorf(starStar.NamePos, "duplicate parameter: %s", starStar.Name) - } - function.HasKwargs = true - } - - function.NumKwonlyParams = numKwonlyParams - r.stmts(function.Body) - - // Resolve all uses of this function's local vars, - // and keep just the remaining uses of free/global vars. - b.resolveLocalUses() - - // Leave function block. - r.pop() - - // References within the function body to globals are not - // resolved until the end of the module. -} - -func (r *resolver) resolveNonLocalUses(b *block) { - // First resolve inner blocks. - for _, child := range b.children { - r.resolveNonLocalUses(child) - } - for _, use := range b.uses { - use.id.Binding = r.lookupLexical(use, use.env) - } -} - -// lookupLocal looks up an identifier within its immediately enclosing function. -func lookupLocal(use use) *Binding { - for env := use.env; env != nil; env = env.parent { - if bind, ok := env.bindings[use.id.Name]; ok { - if bind.Scope == Free { - // shouldn't exist till later - log.Fatalf("%s: internal error: %s, %v", use.id.NamePos, use.id.Name, bind) - } - return bind // found - } - if env.function != nil { - break - } - } - return nil // not found in this function -} - -// lookupLexical looks up an identifier use.id within its lexically enclosing environment. -// The use.env field captures the original environment for error reporting. -func (r *resolver) lookupLexical(use use, env *block) (bind *Binding) { - if debug { - fmt.Printf("lookupLexical %s in %s = ...\n", use.id.Name, env) - defer func() { fmt.Printf("= %v\n", bind) }() - } - - // Is this the file block? - if env == r.file { - return r.useToplevel(use) // file-local, global, predeclared, or not found - } - - // Defined in this block? - bind, ok := env.bindings[use.id.Name] - if !ok { - // Defined in parent block? - bind = r.lookupLexical(use, env.parent) - if env.function != nil && (bind.Scope == Local || bind.Scope == Free || bind.Scope == Cell) { - // Found in parent block, which belongs to enclosing function. - // Add the parent's binding to the function's freevars, - // and add a new 'free' binding to the inner function's block, - // and turn the parent's local into cell. - if bind.Scope == Local { - bind.Scope = Cell - } - index := len(env.function.FreeVars) - env.function.FreeVars = append(env.function.FreeVars, bind) - bind = &Binding{ - First: bind.First, - Scope: Free, - Index: index, - } - if debug { - fmt.Printf("creating freevar %v in function at %s: %s\n", - len(env.function.FreeVars), env.function.Pos, use.id.Name) - } - } - - // Memoize, to avoid duplicate free vars - // and redundant global (failing) lookups. - env.bind(use.id.Name, bind) - } - return bind -} diff --git a/vendor/github.com/k14s/starlark-go/starlark/debug.go b/vendor/github.com/k14s/starlark-go/starlark/debug.go deleted file mode 100644 index 4f3e1379b..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/debug.go +++ /dev/null @@ -1,42 +0,0 @@ -package starlark - -import "github.com/k14s/starlark-go/syntax" - -// This file defines an experimental API for the debugging tools. -// Some of these declarations expose details of internal packages. -// (The debugger makes liberal use of exported fields of unexported types.) -// Breaking changes may occur without notice. - -// Local returns the value of the i'th local variable. -// It may be nil if not yet assigned. -// -// Local may be called only for frames whose Callable is a *Function (a -// function defined by Starlark source code), and only while the frame -// is active; it will panic otherwise. -// -// This function is provided only for debugging tools. -// -// THIS API IS EXPERIMENTAL AND MAY CHANGE WITHOUT NOTICE. -func (fr *frame) Local(i int) Value { return fr.locals[i] } - -// DebugFrame is the debugger API for a frame of the interpreter's call stack. -// -// Most applications have no need for this API; use CallFrame instead. -// -// Clients must not retain a DebugFrame nor call any of its methods once -// the current built-in call has returned or execution has resumed -// after a breakpoint as this may have unpredictable effects, including -// but not limited to retention of object that would otherwise be garbage. -type DebugFrame interface { - Callable() Callable // returns the frame's function - Local(i int) Value // returns the value of the (Starlark) frame's ith local variable - Position() syntax.Position // returns the current position of execution in this frame -} - -// DebugFrame returns the debugger interface for -// the specified frame of the interpreter's call stack. -// Frame numbering is as for Thread.CallFrame. -// -// This function is intended for use in debugging tools. -// Most applications should have no need for it; use CallFrame instead. -func (thread *Thread) DebugFrame(depth int) DebugFrame { return thread.frameAt(depth) } diff --git a/vendor/github.com/k14s/starlark-go/starlark/empty.s b/vendor/github.com/k14s/starlark-go/starlark/empty.s deleted file mode 100644 index 3b8216999..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/empty.s +++ /dev/null @@ -1,3 +0,0 @@ -// The presence of this file allows the package to use the -// "go:linkname" hack to call non-exported functions in the -// Go runtime, such as hardware-accelerated string hashing. diff --git a/vendor/github.com/k14s/starlark-go/starlark/eval.go b/vendor/github.com/k14s/starlark-go/starlark/eval.go deleted file mode 100644 index 2935a9925..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/eval.go +++ /dev/null @@ -1,1442 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math" - "math/big" - "sort" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/k14s/starlark-go/internal/compile" - "github.com/k14s/starlark-go/internal/spell" - "github.com/k14s/starlark-go/resolve" - "github.com/k14s/starlark-go/syntax" -) - -// A Thread contains the state of a Starlark thread, -// such as its call stack and thread-local storage. -// The Thread is threaded throughout the evaluator. -type Thread struct { - // Name is an optional name that describes the thread, for debugging. - Name string - - // stack is the stack of (internal) call frames. - stack []*frame - - // Print is the client-supplied implementation of the Starlark - // 'print' function. If nil, fmt.Fprintln(os.Stderr, msg) is - // used instead. - Print func(thread *Thread, msg string) - - // Load is the client-supplied implementation of module loading. - // Repeated calls with the same module name must return the same - // module environment or error. - // The error message need not include the module name. - // - // See example_test.go for some example implementations of Load. - Load func(thread *Thread, module string) (StringDict, error) - - // locals holds arbitrary "thread-local" Go values belonging to the client. - // They are accessible to the client but not to any Starlark program. - locals map[string]interface{} - - // proftime holds the accumulated execution time since the last profile event. - proftime time.Duration -} - -// SetLocal sets the thread-local value associated with the specified key. -// It must not be called after execution begins. -func (thread *Thread) SetLocal(key string, value interface{}) { - if thread.locals == nil { - thread.locals = make(map[string]interface{}) - } - thread.locals[key] = value -} - -// Local returns the thread-local value associated with the specified key. -func (thread *Thread) Local(key string) interface{} { - return thread.locals[key] -} - -// CallFrame returns a copy of the specified frame of the callstack. -// It should only be used in built-ins called from Starlark code. -// Depth 0 means the frame of the built-in itself, 1 is its caller, and so on. -// -// It is equivalent to CallStack().At(depth), but more efficient. -func (thread *Thread) CallFrame(depth int) CallFrame { - return thread.frameAt(depth).asCallFrame() -} - -func (thread *Thread) frameAt(depth int) *frame { - return thread.stack[len(thread.stack)-1-depth] -} - -// CallStack returns a new slice containing the thread's stack of call frames. -func (thread *Thread) CallStack() CallStack { - frames := make([]CallFrame, len(thread.stack)) - for i, fr := range thread.stack { - frames[i] = fr.asCallFrame() - } - return frames -} - -// CallStackDepth returns the number of frames in the current call stack. -func (thread *Thread) CallStackDepth() int { return len(thread.stack) } - -// A StringDict is a mapping from names to values, and represents -// an environment such as the global variables of a module. -// It is not a true starlark.Value. -type StringDict map[string]Value - -// Keys returns a new sorted slice of d's keys. -func (d StringDict) Keys() []string { - names := make([]string, 0, len(d)) - for name := range d { - names = append(names, name) - } - sort.Strings(names) - return names -} - -func (d StringDict) String() string { - buf := new(strings.Builder) - buf.WriteByte('{') - sep := "" - for _, name := range d.Keys() { - buf.WriteString(sep) - buf.WriteString(name) - buf.WriteString(": ") - writeValue(buf, d[name], nil) - sep = ", " - } - buf.WriteByte('}') - return buf.String() -} - -func (d StringDict) Freeze() { - for _, v := range d { - v.Freeze() - } -} - -// Has reports whether the dictionary contains the specified key. -func (d StringDict) Has(key string) bool { _, ok := d[key]; return ok } - -// A frame records a call to a Starlark function (including module toplevel) -// or a built-in function or method. -type frame struct { - callable Callable // current function (or toplevel) or built-in - pc uint32 // program counter (Starlark frames only) - locals []Value // local variables (Starlark frames only) - spanStart int64 // start time of current profiler span -} - -// Position returns the source position of the current point of execution in this frame. -func (fr *frame) Position() syntax.Position { - switch c := fr.callable.(type) { - case *Function: - // Starlark function - return c.funcode.Position(fr.pc) - case callableWithPosition: - // If a built-in Callable defines - // a Position method, use it. - return c.Position() - } - return syntax.MakePosition(&builtinFilename, 0, 0) -} - -var builtinFilename = "" - -// Function returns the frame's function or built-in. -func (fr *frame) Callable() Callable { return fr.callable } - -// A CallStack is a stack of call frames, outermost first. -type CallStack []CallFrame - -// At returns a copy of the frame at depth i. -// At(0) returns the topmost frame. -func (stack CallStack) At(i int) CallFrame { return stack[len(stack)-1-i] } - -// Pop removes and returns the topmost frame. -func (stack *CallStack) Pop() CallFrame { - last := len(*stack) - 1 - top := (*stack)[last] - *stack = (*stack)[:last] - return top -} - -// String returns a user-friendly description of the stack. -func (stack CallStack) String() string { - out := new(strings.Builder) - fmt.Fprintf(out, "Traceback (most recent call last):\n") - for _, fr := range stack { - fmt.Fprintf(out, " %s: in %s\n", fr.Pos, fr.Name) - } - return out.String() -} - -// An EvalError is a Starlark evaluation error and -// a copy of the thread's stack at the moment of the error. -type EvalError struct { - Msg string - CallStack CallStack -} - -// A CallFrame represents the function name and current -// position of execution of an enclosing call frame. -type CallFrame struct { - Name string - Pos syntax.Position -} - -func (fr *frame) asCallFrame() CallFrame { - return CallFrame{ - Name: fr.Callable().Name(), - Pos: fr.Position(), - } -} - -func (thread *Thread) evalError(err error) *EvalError { - return &EvalError{ - Msg: err.Error(), - CallStack: thread.CallStack(), - } -} - -func (e *EvalError) Error() string { return e.Msg } - -// Backtrace returns a user-friendly error message describing the stack -// of calls that led to this error. -func (e *EvalError) Backtrace() string { - return fmt.Sprintf("%sError: %s", e.CallStack, e.Msg) -} - -// A Program is a compiled Starlark program. -// -// Programs are immutable, and contain no Values. -// A Program may be created by parsing a source file (see SourceProgram) -// or by loading a previously saved compiled program (see CompiledProgram). -type Program struct { - compiled *compile.Program -} - -// CompilerVersion is the version number of the protocol for compiled -// files. Applications must not run programs compiled by one version -// with an interpreter at another version, and should thus incorporate -// the compiler version into the cache key when reusing compiled code. -const CompilerVersion = compile.Version - -// Filename returns the name of the file from which this program was loaded. -func (prog *Program) Filename() string { return prog.compiled.Toplevel.Pos.Filename() } - -func (prog *Program) String() string { return prog.Filename() } - -// NumLoads returns the number of load statements in the compiled program. -func (prog *Program) NumLoads() int { return len(prog.compiled.Loads) } - -// Load(i) returns the name and position of the i'th module directly -// loaded by this one, where 0 <= i < NumLoads(). -// The name is unresolved---exactly as it appears in the source. -func (prog *Program) Load(i int) (string, syntax.Position) { - id := prog.compiled.Loads[i] - return id.Name, id.Pos -} - -// WriteTo writes the compiled module to the specified output stream. -func (prog *Program) Write(out io.Writer) error { - data := prog.compiled.Encode() - _, err := out.Write(data) - return err -} - -// ExecFile parses, resolves, and executes a Starlark file in the -// specified global environment, which may be modified during execution. -// -// Thread is the state associated with the Starlark thread. -// -// The filename and src parameters are as for syntax.Parse: -// filename is the name of the file to execute, -// and the name that appears in error messages; -// src is an optional source of bytes to use -// instead of filename. -// -// predeclared defines the predeclared names specific to this module. -// Execution does not modify this dictionary, though it may mutate -// its values. -// -// If ExecFile fails during evaluation, it returns an *EvalError -// containing a backtrace. -func ExecFile(thread *Thread, filename string, src interface{}, predeclared StringDict) (StringDict, error) { - // Parse, resolve, and compile a Starlark source file. - _, mod, err := SourceProgram(filename, src, predeclared.Has) - if err != nil { - return nil, err - } - - g, err := mod.Init(thread, predeclared) - g.Freeze() - return g, err -} - -// SourceProgram produces a new program by parsing, resolving, -// and compiling a Starlark source file. -// On success, it returns the parsed file and the compiled program. -// The filename and src parameters are as for syntax.Parse. -// -// The isPredeclared predicate reports whether a name is -// a pre-declared identifier of the current module. -// Its typical value is predeclared.Has, -// where predeclared is a StringDict of pre-declared values. -func SourceProgram(filename string, src interface{}, isPredeclared func(string) bool) (*syntax.File, *Program, error) { - f, err := syntax.Parse(filename, src, 0) - if err != nil { - return nil, nil, err - } - prog, err := FileProgram(f, isPredeclared) - return f, prog, err -} - -// FileProgram produces a new program by resolving, -// and compiling the Starlark source file syntax tree. -// On success, it returns the compiled program. -// -// Resolving a syntax tree mutates it. -// Do not call FileProgram more than once on the same file. -// -// The isPredeclared predicate reports whether a name is -// a pre-declared identifier of the current module. -// Its typical value is predeclared.Has, -// where predeclared is a StringDict of pre-declared values. -func FileProgram(f *syntax.File, isPredeclared func(string) bool) (*Program, error) { - if err := resolve.File(f, isPredeclared, Universe.Has); err != nil { - return nil, err - } - - var pos syntax.Position - if len(f.Stmts) > 0 { - pos = syntax.Start(f.Stmts[0]) - } else { - pos = syntax.MakePosition(&f.Path, 1, 1) - } - - module := f.Module.(*resolve.Module) - compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) - - return &Program{compiled}, nil -} - -// CompiledProgram produces a new program from the representation -// of a compiled program previously saved by Program.Write. -func CompiledProgram(in io.Reader) (*Program, error) { - data, err := ioutil.ReadAll(in) - if err != nil { - return nil, err - } - compiled, err := compile.DecodeProgram(data) - if err != nil { - return nil, err - } - return &Program{compiled}, nil -} - -// Init creates a set of global variables for the program, -// executes the toplevel code of the specified program, -// and returns a new, unfrozen dictionary of the globals. -func (prog *Program) Init(thread *Thread, predeclared StringDict) (StringDict, error) { - toplevel := makeToplevelFunction(prog.compiled, predeclared) - - _, err := Call(thread, toplevel, nil, nil) - - // Convert the global environment to a map. - // We return a (partial) map even in case of error. - return toplevel.Globals(), err -} - -func makeToplevelFunction(prog *compile.Program, predeclared StringDict) *Function { - // Create the Starlark value denoted by each program constant c. - constants := make([]Value, len(prog.Constants)) - for i, c := range prog.Constants { - var v Value - switch c := c.(type) { - case int64: - v = MakeInt64(c) - case *big.Int: - v = MakeBigInt(c) - case string: - v = String(c) - case float64: - v = Float(c) - default: - log.Fatalf("unexpected constant %T: %v", c, c) - } - constants[i] = v - } - - return &Function{ - funcode: prog.Toplevel, - module: &module{ - program: prog, - predeclared: predeclared, - globals: make([]Value, len(prog.Globals)), - constants: constants, - }, - } -} - -// Eval parses, resolves, and evaluates an expression within the -// specified (predeclared) environment. -// -// Evaluation cannot mutate the environment dictionary itself, -// though it may modify variables reachable from the dictionary. -// -// The filename and src parameters are as for syntax.Parse. -// -// If Eval fails during evaluation, it returns an *EvalError -// containing a backtrace. -func Eval(thread *Thread, filename string, src interface{}, env StringDict) (Value, error) { - expr, err := syntax.ParseExpr(filename, src, 0) - if err != nil { - return nil, err - } - f, err := makeExprFunc(expr, env) - if err != nil { - return nil, err - } - return Call(thread, f, nil, nil) -} - -// EvalExpr resolves and evaluates an expression within the -// specified (predeclared) environment. -// Evaluating a comma-separated list of expressions yields a tuple value. -// -// Resolving an expression mutates it. -// Do not call EvalExpr more than once for the same expression. -// -// Evaluation cannot mutate the environment dictionary itself, -// though it may modify variables reachable from the dictionary. -// -// If Eval fails during evaluation, it returns an *EvalError -// containing a backtrace. -func EvalExpr(thread *Thread, expr syntax.Expr, env StringDict) (Value, error) { - fn, err := makeExprFunc(expr, env) - if err != nil { - return nil, err - } - return Call(thread, fn, nil, nil) -} - -// ExprFunc returns a no-argument function -// that evaluates the expression whose source is src. -func ExprFunc(filename string, src interface{}, env StringDict) (*Function, error) { - expr, err := syntax.ParseExpr(filename, src, 0) - if err != nil { - return nil, err - } - return makeExprFunc(expr, env) -} - -// makeExprFunc returns a no-argument function whose body is expr. -func makeExprFunc(expr syntax.Expr, env StringDict) (*Function, error) { - locals, err := resolve.Expr(expr, env.Has, Universe.Has) - if err != nil { - return nil, err - } - - return makeToplevelFunction(compile.Expr(expr, "", locals), env), nil -} - -// The following functions are primitive operations of the byte code interpreter. - -// list += iterable -func listExtend(x *List, y Iterable) { - if ylist, ok := y.(*List); ok { - // fast path: list += list - x.elems = append(x.elems, ylist.elems...) - } else { - iter := y.Iterate() - defer iter.Done() - var z Value - for iter.Next(&z) { - x.elems = append(x.elems, z) - } - } -} - -// getAttr implements x.dot. -func getAttr(x Value, name string) (Value, error) { - hasAttr, ok := x.(HasAttrs) - if !ok { - return nil, fmt.Errorf("%s has no .%s field or method", x.Type(), name) - } - - var errmsg string - v, err := hasAttr.Attr(name) - if err == nil { - if v != nil { - return v, nil // success - } - // (nil, nil) => generic error - errmsg = fmt.Sprintf("%s has no .%s field or method", x.Type(), name) - } else if nsa, ok := err.(NoSuchAttrError); ok { - errmsg = string(nsa) - } else { - return nil, err // return error as is - } - - // add spelling hint - if n := spell.Nearest(name, hasAttr.AttrNames()); n != "" { - errmsg = fmt.Sprintf("%s (did you mean .%s?)", errmsg, n) - } - - return nil, fmt.Errorf("%s", errmsg) -} - -// setField implements x.name = y. -func setField(x Value, name string, y Value) error { - if x, ok := x.(HasSetField); ok { - err := x.SetField(name, y) - if _, ok := err.(NoSuchAttrError); ok { - // No such field: check spelling. - if n := spell.Nearest(name, x.AttrNames()); n != "" { - err = fmt.Errorf("%s (did you mean .%s?)", err, n) - } - } - return err - } - - return fmt.Errorf("can't assign to .%s field of %s", name, x.Type()) -} - -// getIndex implements x[y]. -func getIndex(x, y Value) (Value, error) { - switch x := x.(type) { - case Mapping: // dict - z, found, err := x.Get(y) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("key %v not in %s", y, x.Type()) - } - return z, nil - - case Indexable: // string, list, tuple - n := x.Len() - i, err := AsInt32(y) - if err != nil { - return nil, fmt.Errorf("%s index: %s", x.Type(), err) - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return nil, outOfRange(origI, n, x) - } - return x.Index(i), nil - } - return nil, fmt.Errorf("unhandled index operation %s[%s]", x.Type(), y.Type()) -} - -func outOfRange(i, n int, x Value) error { - if n == 0 { - return fmt.Errorf("index %d out of range: empty %s", i, x.Type()) - } else { - return fmt.Errorf("%s index %d out of range [%d:%d]", x.Type(), i, -n, n-1) - } -} - -// setIndex implements x[y] = z. -func setIndex(x, y, z Value) error { - switch x := x.(type) { - case HasSetKey: - if err := x.SetKey(y, z); err != nil { - return err - } - - case HasSetIndex: - n := x.Len() - i, err := AsInt32(y) - if err != nil { - return err - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return outOfRange(origI, n, x) - } - return x.SetIndex(i, z) - - default: - return fmt.Errorf("%s value does not support item assignment", x.Type()) - } - return nil -} - -// Unary applies a unary operator (+, -, ~, not) to its operand. -func Unary(op syntax.Token, x Value) (Value, error) { - // The NOT operator is not customizable. - if op == syntax.NOT { - return !x.Truth(), nil - } - - // Int, Float, and user-defined types - if x, ok := x.(HasUnary); ok { - // (nil, nil) => unhandled - y, err := x.Unary(op) - if y != nil || err != nil { - return y, err - } - } - - return nil, fmt.Errorf("unknown unary op: %s %s", op, x.Type()) -} - -// Binary applies a strict binary operator (not AND or OR) to its operands. -// For equality tests or ordered comparisons, use Compare instead. -func Binary(op syntax.Token, x, y Value) (Value, error) { - switch op { - case syntax.PLUS: - switch x := x.(type) { - case String: - if y, ok := y.(String); ok { - return x + y, nil - } - case Int: - switch y := y.(type) { - case Int: - return x.Add(y), nil - case Float: - return x.Float() + y, nil - } - case Float: - switch y := y.(type) { - case Float: - return x + y, nil - case Int: - return x + y.Float(), nil - } - case *List: - if y, ok := y.(*List); ok { - z := make([]Value, 0, x.Len()+y.Len()) - z = append(z, x.elems...) - z = append(z, y.elems...) - return NewList(z), nil - } - case Tuple: - if y, ok := y.(Tuple); ok { - z := make(Tuple, 0, len(x)+len(y)) - z = append(z, x...) - z = append(z, y...) - return z, nil - } - } - - case syntax.MINUS: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - return x.Sub(y), nil - case Float: - return x.Float() - y, nil - } - case Float: - switch y := y.(type) { - case Float: - return x - y, nil - case Int: - return x - y.Float(), nil - } - } - - case syntax.STAR: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - return x.Mul(y), nil - case Float: - return x.Float() * y, nil - case String: - return stringRepeat(y, x) - case *List: - elems, err := tupleRepeat(Tuple(y.elems), x) - if err != nil { - return nil, err - } - return NewList(elems), nil - case Tuple: - return tupleRepeat(y, x) - } - case Float: - switch y := y.(type) { - case Float: - return x * y, nil - case Int: - return x * y.Float(), nil - } - case String: - if y, ok := y.(Int); ok { - return stringRepeat(x, y) - } - case *List: - if y, ok := y.(Int); ok { - elems, err := tupleRepeat(Tuple(x.elems), y) - if err != nil { - return nil, err - } - return NewList(elems), nil - } - case Tuple: - if y, ok := y.(Int); ok { - return tupleRepeat(x, y) - } - - } - - case syntax.SLASH: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - yf := y.Float() - if yf == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x.Float() / yf, nil - case Float: - if y == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x.Float() / y, nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x / y, nil - case Int: - yf := y.Float() - if yf == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x / yf, nil - } - } - - case syntax.SLASHSLASH: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("floored division by zero") - } - return x.Div(y), nil - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor((x.Float() / y)), nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(x / y), nil - case Int: - yf := y.Float() - if yf == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(x / yf), nil - } - } - - case syntax.PERCENT: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("integer modulo by zero") - } - return x.Mod(y), nil - case Float: - if y == 0 { - return nil, fmt.Errorf("float modulo by zero") - } - return x.Float().Mod(y), nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("float modulo by zero") - } - return Float(math.Mod(float64(x), float64(y))), nil - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("float modulo by zero") - } - return x.Mod(y.Float()), nil - } - case String: - return interpolate(string(x), y) - } - - case syntax.NOT_IN: - z, err := Binary(syntax.IN, x, y) - if err != nil { - return nil, err - } - return !z.Truth(), nil - - case syntax.IN: - switch y := y.(type) { - case *List: - for _, elem := range y.elems { - if eq, err := Equal(elem, x); err != nil { - return nil, err - } else if eq { - return True, nil - } - } - return False, nil - case Tuple: - for _, elem := range y { - if eq, err := Equal(elem, x); err != nil { - return nil, err - } else if eq { - return True, nil - } - } - return False, nil - case Mapping: // e.g. dict - // Ignore error from Get as we cannot distinguish true - // errors (value cycle, type error) from "key not found". - _, found, _ := y.Get(x) - return Bool(found), nil - case *Set: - ok, err := y.Has(x) - return Bool(ok), err - case String: - needle, ok := x.(String) - if !ok { - return nil, fmt.Errorf("'in ' requires string as left operand, not %s", x.Type()) - } - return Bool(strings.Contains(string(y), string(needle))), nil - case rangeValue: - i, err := NumberToInt(x) - if err != nil { - return nil, fmt.Errorf("'in ' requires integer as left operand, not %s", x.Type()) - } - return Bool(y.contains(i)), nil - } - - case syntax.PIPE: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.Or(y), nil - } - case *Set: // union - if y, ok := y.(*Set); ok { - iter := Iterate(y) - defer iter.Done() - return x.Union(iter) - } - } - - case syntax.AMP: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.And(y), nil - } - case *Set: // intersection - if y, ok := y.(*Set); ok { - set := new(Set) - if x.Len() > y.Len() { - x, y = y, x // opt: range over smaller set - } - for _, xelem := range x.elems() { - // Has, Insert cannot fail here. - if found, _ := y.Has(xelem); found { - set.Insert(xelem) - } - } - return set, nil - } - } - - case syntax.CIRCUMFLEX: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.Xor(y), nil - } - case *Set: // symmetric difference - if y, ok := y.(*Set); ok { - set := new(Set) - for _, xelem := range x.elems() { - if found, _ := y.Has(xelem); !found { - set.Insert(xelem) - } - } - for _, yelem := range y.elems() { - if found, _ := x.Has(yelem); !found { - set.Insert(yelem) - } - } - return set, nil - } - } - - case syntax.LTLT, syntax.GTGT: - if x, ok := x.(Int); ok { - y, err := AsInt32(y) - if err != nil { - return nil, err - } - if y < 0 { - return nil, fmt.Errorf("negative shift count: %v", y) - } - if op == syntax.LTLT { - if y >= 512 { - return nil, fmt.Errorf("shift count too large: %v", y) - } - return x.Lsh(uint(y)), nil - } else { - return x.Rsh(uint(y)), nil - } - } - - default: - // unknown operator - goto unknown - } - - // user-defined types - // (nil, nil) => unhandled - if x, ok := x.(HasBinary); ok { - z, err := x.Binary(op, y, Left) - if z != nil || err != nil { - return z, err - } - } - if y, ok := y.(HasBinary); ok { - z, err := y.Binary(op, x, Right) - if z != nil || err != nil { - return z, err - } - } - - // unsupported operand types -unknown: - return nil, fmt.Errorf("unknown binary op: %s %s %s", x.Type(), op, y.Type()) -} - -// It's always possible to overeat in small bites but we'll -// try to stop someone swallowing the world in one gulp. -const maxAlloc = 1 << 30 - -func tupleRepeat(elems Tuple, n Int) (Tuple, error) { - if len(elems) == 0 { - return nil, nil - } - i, err := AsInt32(n) - if err != nil { - return nil, fmt.Errorf("repeat count %s too large", n) - } - if i < 1 { - return nil, nil - } - // Inv: i > 0, len > 0 - sz := len(elems) * i - if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow - return nil, fmt.Errorf("excessive repeat (%d elements)", sz) - } - res := make([]Value, sz) - // copy elems into res, doubling each time - x := copy(res, elems) - for x < len(res) { - copy(res[x:], res[:x]) - x *= 2 - } - return res, nil -} - -func stringRepeat(s String, n Int) (String, error) { - if s == "" { - return "", nil - } - i, err := AsInt32(n) - if err != nil { - return "", fmt.Errorf("repeat count %s too large", n) - } - if i < 1 { - return "", nil - } - // Inv: i > 0, len > 0 - sz := len(s) * i - if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow - return "", fmt.Errorf("excessive repeat (%d elements)", sz) - } - return String(strings.Repeat(string(s), i)), nil -} - -// Call calls the function fn with the specified positional and keyword arguments. -func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) { - c, ok := fn.(Callable) - if !ok { - return nil, fmt.Errorf("invalid call of non-function (%s)", fn.Type()) - } - - // Allocate and push a new frame. - var fr *frame - // Optimization: use slack portion of thread.stack - // slice as a freelist of empty frames. - if n := len(thread.stack); n < cap(thread.stack) { - fr = thread.stack[n : n+1][0] - } - if fr == nil { - fr = new(frame) - } - thread.stack = append(thread.stack, fr) // push - - fr.callable = c - - thread.beginProfSpan() - result, err := c.CallInternal(thread, args, kwargs) - thread.endProfSpan() - - // Sanity check: nil is not a valid Starlark value. - if result == nil && err == nil { - err = fmt.Errorf("internal error: nil (not None) returned from %s", fn) - } - - // Always return an EvalError with an accurate frame. - if err != nil { - if _, ok := err.(*EvalError); !ok { - err = thread.evalError(err) - } - } - - *fr = frame{} // clear out any references - thread.stack = thread.stack[:len(thread.stack)-1] // pop - - return result, err -} - -func slice(x, lo, hi, step_ Value) (Value, error) { - sliceable, ok := x.(Sliceable) - if !ok { - return nil, fmt.Errorf("invalid slice operand %s", x.Type()) - } - - n := sliceable.Len() - step := 1 - if step_ != None { - var err error - step, err = AsInt32(step_) - if err != nil { - return nil, fmt.Errorf("got %s for slice step, want int", step_.Type()) - } - if step == 0 { - return nil, fmt.Errorf("zero is not a valid slice step") - } - } - - // TODO(adonovan): opt: preallocate result array. - - var start, end int - if step > 0 { - // positive stride - // default indices are [0:n]. - var err error - start, end, err = indices(lo, hi, n) - if err != nil { - return nil, err - } - - if end < start { - end = start // => empty result - } - } else { - // negative stride - // default indices are effectively [n-1:-1], though to - // get this effect using explicit indices requires - // [n-1:-1-n:-1] because of the treatment of -ve values. - start = n - 1 - if err := asIndex(lo, n, &start); err != nil { - return nil, fmt.Errorf("invalid start index: %s", err) - } - if start >= n { - start = n - 1 - } - - end = -1 - if err := asIndex(hi, n, &end); err != nil { - return nil, fmt.Errorf("invalid end index: %s", err) - } - if end < -1 { - end = -1 - } - - if start < end { - start = end // => empty result - } - } - - return sliceable.Slice(start, end, step), nil -} - -// From Hacker's Delight, section 2.8. -func signum64(x int64) int { return int(uint64(x>>63) | uint64(-x)>>63) } -func signum(x int) int { return signum64(int64(x)) } - -// indices converts start_ and end_ to indices in the range [0:len]. -// The start index defaults to 0 and the end index defaults to len. -// An index -len < i < 0 is treated like i+len. -// All other indices outside the range are clamped to the nearest value in the range. -// Beware: start may be greater than end. -// This function is suitable only for slices with positive strides. -func indices(start_, end_ Value, len int) (start, end int, err error) { - start = 0 - if err := asIndex(start_, len, &start); err != nil { - return 0, 0, fmt.Errorf("invalid start index: %s", err) - } - // Clamp to [0:len]. - if start < 0 { - start = 0 - } else if start > len { - start = len - } - - end = len - if err := asIndex(end_, len, &end); err != nil { - return 0, 0, fmt.Errorf("invalid end index: %s", err) - } - // Clamp to [0:len]. - if end < 0 { - end = 0 - } else if end > len { - end = len - } - - return start, end, nil -} - -// asIndex sets *result to the integer value of v, adding len to it -// if it is negative. If v is nil or None, *result is unchanged. -func asIndex(v Value, len int, result *int) error { - if v != nil && v != None { - var err error - *result, err = AsInt32(v) - if err != nil { - return fmt.Errorf("got %s, want int", v.Type()) - } - if *result < 0 { - *result += len - } - } - return nil -} - -// setArgs sets the values of the formal parameters of function fn in -// based on the actual parameter values in args and kwargs. -func setArgs(locals []Value, fn *Function, args Tuple, kwargs []Tuple) error { - - // This is the general schema of a function: - // - // def f(p1, p2=dp2, p3=dp3, *args, k1, k2=dk2, k3, **kwargs) - // - // The p parameters are non-kwonly, and may be specified positionally. - // The k parameters are kwonly, and must be specified by name. - // The defaults tuple is (dp2, dp3, mandatory, dk2, mandatory). - // - // Arguments are processed as follows: - // - positional arguments are bound to a prefix of [p1, p2, p3]. - // - surplus positional arguments are bound to *args. - // - keyword arguments are bound to any of {p1, p2, p3, k1, k2, k3}; - // duplicate bindings are rejected. - // - surplus keyword arguments are bound to **kwargs. - // - defaults are bound to each parameter from p2 to k3 if no value was set. - // default values come from the tuple above. - // It is an error if the tuple entry for an unset parameter is 'mandatory'. - - // Nullary function? - if fn.NumParams() == 0 { - if nactual := len(args) + len(kwargs); nactual > 0 { - return fmt.Errorf("function %s accepts no arguments (%d given)", fn.Name(), nactual) - } - return nil - } - - cond := func(x bool, y, z interface{}) interface{} { - if x { - return y - } - return z - } - - // nparams is the number of ordinary parameters (sans *args and **kwargs). - nparams := fn.NumParams() - var kwdict *Dict - if fn.HasKwargs() { - nparams-- - kwdict = new(Dict) - locals[nparams] = kwdict - } - if fn.HasVarargs() { - nparams-- - } - - // nonkwonly is the number of non-kwonly parameters. - nonkwonly := nparams - fn.NumKwonlyParams() - - // Too many positional args? - n := len(args) - if len(args) > nonkwonly { - if !fn.HasVarargs() { - return fmt.Errorf("function %s accepts %s%d positional argument%s (%d given)", - fn.Name(), - cond(len(fn.defaults) > fn.NumKwonlyParams(), "at most ", ""), - nonkwonly, - cond(nonkwonly == 1, "", "s"), - len(args)) - } - n = nonkwonly - } - - // Bind positional arguments to non-kwonly parameters. - for i := 0; i < n; i++ { - locals[i] = args[i] - } - - // Bind surplus positional arguments to *args parameter. - if fn.HasVarargs() { - tuple := make(Tuple, len(args)-n) - for i := n; i < len(args); i++ { - tuple[i-n] = args[i] - } - locals[nparams] = tuple - } - - // Bind keyword arguments to parameters. - paramIdents := fn.funcode.Locals[:nparams] - for _, pair := range kwargs { - k, v := pair[0].(String), pair[1] - if i := findParam(paramIdents, string(k)); i >= 0 { - if locals[i] != nil { - return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) - } - locals[i] = v - continue - } - if kwdict == nil { - return fmt.Errorf("function %s got an unexpected keyword argument %s", fn.Name(), k) - } - oldlen := kwdict.Len() - kwdict.SetKey(k, v) - if kwdict.Len() == oldlen { - return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) - } - } - - // Are defaults required? - if n < nparams || fn.NumKwonlyParams() > 0 { - m := nparams - len(fn.defaults) // first default - - // Report errors for missing required arguments. - var missing []string - var i int - for i = n; i < m; i++ { - if locals[i] == nil { - missing = append(missing, paramIdents[i].Name) - } - } - - // Bind default values to parameters. - for ; i < nparams; i++ { - if locals[i] == nil { - dflt := fn.defaults[i-m] - if _, ok := dflt.(mandatory); ok { - missing = append(missing, paramIdents[i].Name) - continue - } - locals[i] = dflt - } - } - - if missing != nil { - return fmt.Errorf("function %s missing %d argument%s (%s)", - fn.Name(), len(missing), cond(len(missing) > 1, "s", ""), strings.Join(missing, ", ")) - } - } - return nil -} - -func findParam(params []compile.Binding, name string) int { - for i, param := range params { - if param.Name == name { - return i - } - } - return -1 -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string-interpolation -func interpolate(format string, x Value) (Value, error) { - buf := new(strings.Builder) - index := 0 - nargs := 1 - if tuple, ok := x.(Tuple); ok { - nargs = len(tuple) - } - for { - i := strings.IndexByte(format, '%') - if i < 0 { - buf.WriteString(format) - break - } - buf.WriteString(format[:i]) - format = format[i+1:] - - if format != "" && format[0] == '%' { - buf.WriteByte('%') - format = format[1:] - continue - } - - var arg Value - if format != "" && format[0] == '(' { - // keyword argument: %(name)s. - format = format[1:] - j := strings.IndexByte(format, ')') - if j < 0 { - return nil, fmt.Errorf("incomplete format key") - } - key := format[:j] - if dict, ok := x.(Mapping); !ok { - return nil, fmt.Errorf("format requires a mapping") - } else if v, found, _ := dict.Get(String(key)); found { - arg = v - } else { - return nil, fmt.Errorf("key not found: %s", key) - } - format = format[j+1:] - } else { - // positional argument: %s. - if index >= nargs { - return nil, fmt.Errorf("not enough arguments for format string") - } - if tuple, ok := x.(Tuple); ok { - arg = tuple[index] - } else { - arg = x - } - } - - // NOTE: Starlark does not support any of these optional Python features: - // - optional conversion flags: [#0- +], etc. - // - optional minimum field width (number or *). - // - optional precision (.123 or *) - // - optional length modifier - - // conversion type - if format == "" { - return nil, fmt.Errorf("incomplete format") - } - switch c := format[0]; c { - case 's', 'r': - if str, ok := AsString(arg); ok && c == 's' { - buf.WriteString(str) - } else { - writeValue(buf, arg, nil) - } - case 'd', 'i', 'o', 'x', 'X': - i, err := NumberToInt(arg) - if err != nil { - return nil, fmt.Errorf("%%%c format requires integer: %v", c, err) - } - switch c { - case 'd', 'i': - fmt.Fprintf(buf, "%d", i) - case 'o': - fmt.Fprintf(buf, "%o", i) - case 'x': - fmt.Fprintf(buf, "%x", i) - case 'X': - fmt.Fprintf(buf, "%X", i) - } - case 'e', 'f', 'g', 'E', 'F', 'G': - f, ok := AsFloat(arg) - if !ok { - return nil, fmt.Errorf("%%%c format requires float, not %s", c, arg.Type()) - } - switch c { - case 'e': - fmt.Fprintf(buf, "%e", f) - case 'f': - fmt.Fprintf(buf, "%f", f) - case 'g': - fmt.Fprintf(buf, "%g", f) - case 'E': - fmt.Fprintf(buf, "%E", f) - case 'F': - fmt.Fprintf(buf, "%F", f) - case 'G': - fmt.Fprintf(buf, "%G", f) - } - case 'c': - switch arg := arg.(type) { - case Int: - // chr(int) - r, err := AsInt32(arg) - if err != nil || r < 0 || r > unicode.MaxRune { - return nil, fmt.Errorf("%%c format requires a valid Unicode code point, got %s", arg) - } - buf.WriteRune(rune(r)) - case String: - r, size := utf8.DecodeRuneInString(string(arg)) - if size != len(arg) || len(arg) == 0 { - return nil, fmt.Errorf("%%c format requires a single-character string") - } - buf.WriteRune(r) - default: - return nil, fmt.Errorf("%%c format requires int or single-character string, not %s", arg.Type()) - } - case '%': - buf.WriteByte('%') - default: - return nil, fmt.Errorf("unknown conversion %%%c", c) - } - format = format[1:] - index++ - } - - if index < nargs { - return nil, fmt.Errorf("too many arguments for format string") - } - - return String(buf.String()), nil -} diff --git a/vendor/github.com/k14s/starlark-go/starlark/hashtable.go b/vendor/github.com/k14s/starlark-go/starlark/hashtable.go deleted file mode 100644 index d4250194a..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/hashtable.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - _ "unsafe" // for go:linkname hack -) - -// hashtable is used to represent Starlark dict and set values. -// It is a hash table whose key/value entries form a doubly-linked list -// in the order the entries were inserted. -type hashtable struct { - table []bucket // len is zero or a power of two - bucket0 [1]bucket // inline allocation for small maps. - len uint32 - itercount uint32 // number of active iterators (ignored if frozen) - head *entry // insertion order doubly-linked list; may be nil - tailLink **entry // address of nil link at end of list (perhaps &head) - frozen bool -} - -const bucketSize = 8 - -type bucket struct { - entries [bucketSize]entry - next *bucket // linked list of buckets -} - -type entry struct { - hash uint32 // nonzero => in use - key, value Value - next *entry // insertion order doubly-linked list; may be nil - prevLink **entry // address of link to this entry (perhaps &head) -} - -func (ht *hashtable) init(size int) { - if size < 0 { - panic("size < 0") - } - nb := 1 - for overloaded(size, nb) { - nb = nb << 1 - } - if nb < 2 { - ht.table = ht.bucket0[:1] - } else { - ht.table = make([]bucket, nb) - } - ht.tailLink = &ht.head -} - -func (ht *hashtable) freeze() { - if !ht.frozen { - ht.frozen = true - for i := range ht.table { - for p := &ht.table[i]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash != 0 { - e.key.Freeze() - e.value.Freeze() - } - } - } - } - } -} - -func (ht *hashtable) insert(k, v Value) error { - if ht.frozen { - return fmt.Errorf("cannot insert into frozen hash table") - } - if ht.itercount > 0 { - return fmt.Errorf("cannot insert into hash table during iteration") - } - if ht.table == nil { - ht.init(1) - } - h, err := k.Hash() - if err != nil { - return err - } - if h == 0 { - h = 1 // zero is reserved - } - -retry: - var insert *entry - - // Inspect each bucket in the bucket list. - p := &ht.table[h&(uint32(len(ht.table)-1))] - for { - for i := range p.entries { - e := &p.entries[i] - if e.hash != h { - if e.hash == 0 { - // Found empty entry; make a note. - insert = e - } - continue - } - if eq, err := Equal(k, e.key); err != nil { - return err // e.g. excessively recursive tuple - } else if !eq { - continue - } - // Key already present; update value. - e.value = v - return nil - } - if p.next == nil { - break - } - p = p.next - } - - // Key not found. p points to the last bucket. - - // Does the number of elements exceed the buckets' load factor? - if overloaded(int(ht.len), len(ht.table)) { - ht.grow() - goto retry - } - - if insert == nil { - // No space in existing buckets. Add a new one to the bucket list. - b := new(bucket) - p.next = b - insert = &b.entries[0] - } - - // Insert key/value pair. - insert.hash = h - insert.key = k - insert.value = v - - // Append entry to doubly-linked list. - insert.prevLink = ht.tailLink - *ht.tailLink = insert - ht.tailLink = &insert.next - - ht.len++ - - return nil -} - -func overloaded(elems, buckets int) bool { - const loadFactor = 6.5 // just a guess - return elems >= bucketSize && float64(elems) >= loadFactor*float64(buckets) -} - -func (ht *hashtable) grow() { - // Double the number of buckets and rehash. - // TODO(adonovan): opt: - // - avoid reentrant calls to ht.insert, and specialize it. - // e.g. we know the calls to Equals will return false since - // there are no duplicates among the old keys. - // - saving the entire hash in the bucket would avoid the need to - // recompute the hash. - // - save the old buckets on a free list. - ht.table = make([]bucket, len(ht.table)<<1) - oldhead := ht.head - ht.head = nil - ht.tailLink = &ht.head - ht.len = 0 - for e := oldhead; e != nil; e = e.next { - ht.insert(e.key, e.value) - } - ht.bucket0[0] = bucket{} // clear out unused initial bucket -} - -func (ht *hashtable) lookup(k Value) (v Value, found bool, err error) { - h, err := k.Hash() - if err != nil { - return nil, false, err // unhashable - } - if h == 0 { - h = 1 // zero is reserved - } - if ht.table == nil { - return None, false, nil // empty - } - - // Inspect each bucket in the bucket list. - for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash == h { - if eq, err := Equal(k, e.key); err != nil { - return nil, false, err // e.g. excessively recursive tuple - } else if eq { - return e.value, true, nil // found - } - } - } - } - return None, false, nil // not found -} - -// Items returns all the items in the map (as key/value pairs) in insertion order. -func (ht *hashtable) items() []Tuple { - items := make([]Tuple, 0, ht.len) - array := make([]Value, ht.len*2) // allocate a single backing array - for e := ht.head; e != nil; e = e.next { - pair := Tuple(array[:2:2]) - array = array[2:] - pair[0] = e.key - pair[1] = e.value - items = append(items, pair) - } - return items -} - -func (ht *hashtable) first() (Value, bool) { - if ht.head != nil { - return ht.head.key, true - } - return None, false -} - -func (ht *hashtable) keys() []Value { - keys := make([]Value, 0, ht.len) - for e := ht.head; e != nil; e = e.next { - keys = append(keys, e.key) - } - return keys -} - -func (ht *hashtable) delete(k Value) (v Value, found bool, err error) { - if ht.frozen { - return nil, false, fmt.Errorf("cannot delete from frozen hash table") - } - if ht.itercount > 0 { - return nil, false, fmt.Errorf("cannot delete from hash table during iteration") - } - if ht.table == nil { - return None, false, nil // empty - } - h, err := k.Hash() - if err != nil { - return nil, false, err // unhashable - } - if h == 0 { - h = 1 // zero is reserved - } - - // Inspect each bucket in the bucket list. - for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash == h { - if eq, err := Equal(k, e.key); err != nil { - return nil, false, err - } else if eq { - // Remove e from doubly-linked list. - *e.prevLink = e.next - if e.next == nil { - ht.tailLink = e.prevLink // deletion of last entry - } else { - e.next.prevLink = e.prevLink - } - - v := e.value - *e = entry{} - ht.len-- - return v, true, nil // found - } - } - } - } - - // TODO(adonovan): opt: remove completely empty bucket from bucket list. - - return None, false, nil // not found -} - -func (ht *hashtable) clear() error { - if ht.frozen { - return fmt.Errorf("cannot clear frozen hash table") - } - if ht.itercount > 0 { - return fmt.Errorf("cannot clear hash table during iteration") - } - if ht.table != nil { - for i := range ht.table { - ht.table[i] = bucket{} - } - } - ht.head = nil - ht.tailLink = &ht.head - ht.len = 0 - return nil -} - -// dump is provided as an aid to debugging. -func (ht *hashtable) dump() { - fmt.Printf("hashtable %p len=%d head=%p tailLink=%p", - ht, ht.len, ht.head, ht.tailLink) - if ht.tailLink != nil { - fmt.Printf(" *tailLink=%p", *ht.tailLink) - } - fmt.Println() - for j := range ht.table { - fmt.Printf("bucket chain %d\n", j) - for p := &ht.table[j]; p != nil; p = p.next { - fmt.Printf("bucket %p\n", p) - for i := range p.entries { - e := &p.entries[i] - fmt.Printf("\tentry %d @ %p hash=%d key=%v value=%v\n", - i, e, e.hash, e.key, e.value) - fmt.Printf("\t\tnext=%p &next=%p prev=%p", - e.next, &e.next, e.prevLink) - if e.prevLink != nil { - fmt.Printf(" *prev=%p", *e.prevLink) - } - fmt.Println() - } - } - } -} - -func (ht *hashtable) iterate() *keyIterator { - if !ht.frozen { - ht.itercount++ - } - return &keyIterator{ht: ht, e: ht.head} -} - -type keyIterator struct { - ht *hashtable - e *entry -} - -func (it *keyIterator) Next(k *Value) bool { - if it.e != nil { - *k = it.e.key - it.e = it.e.next - return true - } - return false -} - -func (it *keyIterator) Done() { - if !it.ht.frozen { - it.ht.itercount-- - } -} - -// hashString computes the hash of s. -func hashString(s string) uint32 { - if len(s) >= 12 { - // Call the Go runtime's optimized hash implementation, - // which uses the AESENC instruction on amd64 machines. - return uint32(goStringHash(s, 0)) - } - return softHashString(s) -} - -//go:linkname goStringHash runtime.stringHash -func goStringHash(s string, seed uintptr) uintptr - -// softHashString computes the FNV hash of s in software. -func softHashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} diff --git a/vendor/github.com/k14s/starlark-go/starlark/int.go b/vendor/github.com/k14s/starlark-go/starlark/int.go deleted file mode 100644 index cd16ecfac..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/int.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - "math" - "math/big" - "strconv" - - "github.com/k14s/starlark-go/syntax" -) - -// Int is the type of a Starlark int. -type Int struct { - // We use only the signed 32 bit range of small to ensure - // that small+small and small*small do not overflow. - - small int64 // minint32 <= small <= maxint32 - big *big.Int // big != nil <=> value is not representable as int32 -} - -// newBig allocates a new big.Int. -func newBig(x int64) *big.Int { - if 0 <= x && int64(big.Word(x)) == x { - // x is guaranteed to fit into a single big.Word. - // Most starlark ints are small, - // but math/big assumes that since you've chosen to use math/big, - // your big.Ints will probably grow, so it over-allocates. - // Avoid that over-allocation by manually constructing a single-word slice. - // See https://golang.org/cl/150999, which will hopefully land in Go 1.13. - return new(big.Int).SetBits([]big.Word{big.Word(x)}) - } - return big.NewInt(x) -} - -// MakeInt returns a Starlark int for the specified signed integer. -func MakeInt(x int) Int { return MakeInt64(int64(x)) } - -// MakeInt64 returns a Starlark int for the specified int64. -func MakeInt64(x int64) Int { - if math.MinInt32 <= x && x <= math.MaxInt32 { - return Int{small: x} - } - return Int{big: newBig(x)} -} - -// MakeUint returns a Starlark int for the specified unsigned integer. -func MakeUint(x uint) Int { return MakeUint64(uint64(x)) } - -// MakeUint64 returns a Starlark int for the specified uint64. -func MakeUint64(x uint64) Int { - if x <= math.MaxInt32 { - return Int{small: int64(x)} - } - if uint64(big.Word(x)) == x { - // See comment in newBig for an explanation of this optimization. - return Int{big: new(big.Int).SetBits([]big.Word{big.Word(x)})} - } - return Int{big: new(big.Int).SetUint64(x)} -} - -// MakeBigInt returns a Starlark int for the specified big.Int. -// The caller must not subsequently modify x. -func MakeBigInt(x *big.Int) Int { - if n := x.BitLen(); n < 32 || n == 32 && x.Int64() == math.MinInt32 { - return Int{small: x.Int64()} - } - return Int{big: x} -} - -var ( - zero, one = Int{small: 0}, Int{small: 1} - oneBig = newBig(1) - - _ HasUnary = Int{} -) - -// Unary implements the operations +int, -int, and ~int. -func (i Int) Unary(op syntax.Token) (Value, error) { - switch op { - case syntax.MINUS: - return zero.Sub(i), nil - case syntax.PLUS: - return i, nil - case syntax.TILDE: - return i.Not(), nil - } - return nil, nil -} - -// Int64 returns the value as an int64. -// If it is not exactly representable the result is undefined and ok is false. -func (i Int) Int64() (_ int64, ok bool) { - if i.big != nil { - x, acc := bigintToInt64(i.big) - if acc != big.Exact { - return // inexact - } - return x, true - } - return i.small, true -} - -// BigInt returns the value as a big.Int. -// The returned variable must not be modified by the client. -func (i Int) BigInt() *big.Int { - if i.big != nil { - return i.big - } - return newBig(i.small) -} - -// Uint64 returns the value as a uint64. -// If it is not exactly representable the result is undefined and ok is false. -func (i Int) Uint64() (_ uint64, ok bool) { - if i.big != nil { - x, acc := bigintToUint64(i.big) - if acc != big.Exact { - return // inexact - } - return x, true - } - if i.small < 0 { - return // inexact - } - return uint64(i.small), true -} - -// The math/big API should provide this function. -func bigintToInt64(i *big.Int) (int64, big.Accuracy) { - sign := i.Sign() - if sign > 0 { - if i.Cmp(maxint64) > 0 { - return math.MaxInt64, big.Below - } - } else if sign < 0 { - if i.Cmp(minint64) < 0 { - return math.MinInt64, big.Above - } - } - return i.Int64(), big.Exact -} - -// The math/big API should provide this function. -func bigintToUint64(i *big.Int) (uint64, big.Accuracy) { - sign := i.Sign() - if sign > 0 { - if i.BitLen() > 64 { - return math.MaxUint64, big.Below - } - } else if sign < 0 { - return 0, big.Above - } - return i.Uint64(), big.Exact -} - -var ( - minint64 = new(big.Int).SetInt64(math.MinInt64) - maxint64 = new(big.Int).SetInt64(math.MaxInt64) -) - -func (i Int) Format(s fmt.State, ch rune) { - if i.big != nil { - i.big.Format(s, ch) - return - } - newBig(i.small).Format(s, ch) -} -func (i Int) String() string { - if i.big != nil { - return i.big.Text(10) - } - return strconv.FormatInt(i.small, 10) -} -func (i Int) Type() string { return "int" } -func (i Int) Freeze() {} // immutable -func (i Int) Truth() Bool { return i.Sign() != 0 } -func (i Int) Hash() (uint32, error) { - var lo big.Word - if i.big != nil { - lo = i.big.Bits()[0] - } else { - lo = big.Word(i.small) - } - return 12582917 * uint32(lo+3), nil -} -func (x Int) CompareSameType(op syntax.Token, v Value, depth int) (bool, error) { - y := v.(Int) - if x.big != nil || y.big != nil { - return threeway(op, x.BigInt().Cmp(y.BigInt())), nil - } - return threeway(op, signum64(x.small-y.small)), nil -} - -// Float returns the float value nearest i. -func (i Int) Float() Float { - if i.big != nil { - f, _ := new(big.Float).SetInt(i.big).Float64() - return Float(f) - } - return Float(i.small) -} - -func (x Int) Sign() int { - if x.big != nil { - return x.big.Sign() - } - return signum64(x.small) -} - -func (x Int) Add(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Add(x.BigInt(), y.BigInt())) - } - return MakeInt64(x.small + y.small) -} -func (x Int) Sub(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Sub(x.BigInt(), y.BigInt())) - } - return MakeInt64(x.small - y.small) -} -func (x Int) Mul(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Mul(x.BigInt(), y.BigInt())) - } - return MakeInt64(x.small * y.small) -} -func (x Int) Or(y Int) Int { - if x.big != nil || y.big != nil { - return Int{big: new(big.Int).Or(x.BigInt(), y.BigInt())} - } - return Int{small: x.small | y.small} -} -func (x Int) And(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).And(x.BigInt(), y.BigInt())) - } - return Int{small: x.small & y.small} -} -func (x Int) Xor(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Xor(x.BigInt(), y.BigInt())) - } - return Int{small: x.small ^ y.small} -} -func (x Int) Not() Int { - if x.big != nil { - return MakeBigInt(new(big.Int).Not(x.big)) - } - return Int{small: ^x.small} -} -func (x Int) Lsh(y uint) Int { return MakeBigInt(new(big.Int).Lsh(x.BigInt(), y)) } -func (x Int) Rsh(y uint) Int { return MakeBigInt(new(big.Int).Rsh(x.BigInt(), y)) } - -// Precondition: y is nonzero. -func (x Int) Div(y Int) Int { - // http://python-history.blogspot.com/2010/08/why-pythons-integer-division-floors.html - if x.big != nil || y.big != nil { - xb, yb := x.BigInt(), y.BigInt() - - var quo, rem big.Int - quo.QuoRem(xb, yb, &rem) - if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { - quo.Sub(&quo, oneBig) - } - return MakeBigInt(&quo) - } - quo := x.small / y.small - rem := x.small % y.small - if (x.small < 0) != (y.small < 0) && rem != 0 { - quo -= 1 - } - return MakeInt64(quo) -} - -// Precondition: y is nonzero. -func (x Int) Mod(y Int) Int { - if x.big != nil || y.big != nil { - xb, yb := x.BigInt(), y.BigInt() - - var quo, rem big.Int - quo.QuoRem(xb, yb, &rem) - if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { - rem.Add(&rem, yb) - } - return MakeBigInt(&rem) - } - rem := x.small % y.small - if (x.small < 0) != (y.small < 0) && rem != 0 { - rem += y.small - } - return Int{small: rem} -} - -func (i Int) rational() *big.Rat { - if i.big != nil { - return new(big.Rat).SetInt(i.big) - } - return new(big.Rat).SetInt64(i.small) -} - -// AsInt32 returns the value of x if is representable as an int32. -func AsInt32(x Value) (int, error) { - i, ok := x.(Int) - if !ok { - return 0, fmt.Errorf("got %s, want int", x.Type()) - } - if i.big != nil { - return 0, fmt.Errorf("%s out of range", i) - } - return int(i.small), nil -} - -// NumberToInt converts a number x to an integer value. -// An int is returned unchanged, a float is truncated towards zero. -// NumberToInt reports an error for all other values. -func NumberToInt(x Value) (Int, error) { - switch x := x.(type) { - case Int: - return x, nil - case Float: - f := float64(x) - if math.IsInf(f, 0) { - return zero, fmt.Errorf("cannot convert float infinity to integer") - } else if math.IsNaN(f) { - return zero, fmt.Errorf("cannot convert float NaN to integer") - } - return finiteFloatToInt(x), nil - - } - return zero, fmt.Errorf("cannot convert %s to int", x.Type()) -} - -// finiteFloatToInt converts f to an Int, truncating towards zero. -// f must be finite. -func finiteFloatToInt(f Float) Int { - if math.MinInt64 <= f && f <= math.MaxInt64 { - // small values - return MakeInt64(int64(f)) - } - rat := f.rational() - if rat == nil { - panic(f) // non-finite - } - return MakeBigInt(new(big.Int).Div(rat.Num(), rat.Denom())) -} diff --git a/vendor/github.com/k14s/starlark-go/starlark/interp.go b/vendor/github.com/k14s/starlark-go/starlark/interp.go deleted file mode 100644 index 4aecf04f7..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/interp.go +++ /dev/null @@ -1,619 +0,0 @@ -package starlark - -// This file defines the bytecode interpreter. - -import ( - "fmt" - "os" - - "github.com/k14s/starlark-go/internal/compile" - "github.com/k14s/starlark-go/internal/spell" - "github.com/k14s/starlark-go/resolve" - "github.com/k14s/starlark-go/syntax" -) - -const vmdebug = false // TODO(adonovan): use a bitfield of specific kinds of error. - -// TODO(adonovan): -// - optimize position table. -// - opt: record MaxIterStack during compilation and preallocate the stack. - -func (fn *Function) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { - if !resolve.AllowRecursion { - // detect recursion - for _, fr := range thread.stack[:len(thread.stack)-1] { - // We look for the same function code, - // not function value, otherwise the user could - // defeat the check by writing the Y combinator. - if frfn, ok := fr.Callable().(*Function); ok && frfn.funcode == fn.funcode { - return nil, fmt.Errorf("function %s called recursively", fn.Name()) - } - } - } - - f := fn.funcode - fr := thread.frameAt(0) - - // Allocate space for stack and locals. - // Logically these do not escape from this frame - // (See https://github.com/golang/go/issues/20533.) - // - // This heap allocation looks expensive, but I was unable to get - // more than 1% real time improvement in a large alloc-heavy - // benchmark (in which this alloc was 8% of alloc-bytes) - // by allocating space for 8 Values in each frame, or - // by allocating stack by slicing an array held by the Thread - // that is expanded in chunks of min(k, nspace), for k=256 or 1024. - nlocals := len(f.Locals) - nspace := nlocals + f.MaxStack - space := make([]Value, nspace) - locals := space[:nlocals:nlocals] // local variables, starting with parameters - stack := space[nlocals:] // operand stack - - // Digest arguments and set parameters. - err := setArgs(locals, fn, args, kwargs) - if err != nil { - return nil, thread.evalError(err) - } - - fr.locals = locals - - if vmdebug { - fmt.Printf("Entering %s @ %s\n", f.Name, f.Position(0)) - fmt.Printf("%d stack, %d locals\n", len(stack), len(locals)) - defer fmt.Println("Leaving ", f.Name) - } - - // Spill indicated locals to cells. - // Each cell is a separate alloc to avoid spurious liveness. - for _, index := range f.Cells { - locals[index] = &cell{locals[index]} - } - - // TODO(adonovan): add static check that beneath this point - // - there is exactly one return statement - // - there is no redefinition of 'err'. - - var iterstack []Iterator // stack of active iterators - - sp := 0 - var pc uint32 - var result Value - code := f.Code -loop: - for { - fr.pc = pc - - op := compile.Opcode(code[pc]) - pc++ - var arg uint32 - if op >= compile.OpcodeArgMin { - // TODO(adonovan): opt: profile this. - // Perhaps compiling big endian would be less work to decode? - for s := uint(0); ; s += 7 { - b := code[pc] - pc++ - arg |= uint32(b&0x7f) << s - if b < 0x80 { - break - } - } - } - if vmdebug { - fmt.Fprintln(os.Stderr, stack[:sp]) // very verbose! - compile.PrintOp(f, fr.pc, op, arg) - } - - switch op { - case compile.NOP: - // nop - - case compile.DUP: - stack[sp] = stack[sp-1] - sp++ - - case compile.DUP2: - stack[sp] = stack[sp-2] - stack[sp+1] = stack[sp-1] - sp += 2 - - case compile.POP: - sp-- - - case compile.EXCH: - stack[sp-2], stack[sp-1] = stack[sp-1], stack[sp-2] - - case compile.EQL, compile.NEQ, compile.GT, compile.LT, compile.LE, compile.GE: - op := syntax.Token(op-compile.EQL) + syntax.EQL - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - ok, err2 := Compare(op, x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = Bool(ok) - sp++ - - case compile.PLUS, - compile.MINUS, - compile.STAR, - compile.SLASH, - compile.SLASHSLASH, - compile.PERCENT, - compile.AMP, - compile.PIPE, - compile.CIRCUMFLEX, - compile.LTLT, - compile.GTGT, - compile.IN: - binop := syntax.Token(op-compile.PLUS) + syntax.PLUS - if op == compile.IN { - binop = syntax.IN // IN token is out of order - } - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - z, err2 := Binary(binop, x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = z - sp++ - - case compile.UPLUS, compile.UMINUS, compile.TILDE: - var unop syntax.Token - if op == compile.TILDE { - unop = syntax.TILDE - } else { - unop = syntax.Token(op-compile.UPLUS) + syntax.PLUS - } - x := stack[sp-1] - y, err2 := Unary(unop, x) - if err2 != nil { - err = err2 - break loop - } - stack[sp-1] = y - - case compile.INPLACE_ADD: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - - // It's possible that y is not Iterable but - // nonetheless defines x+y, in which case we - // should fall back to the general case. - var z Value - if xlist, ok := x.(*List); ok { - if yiter, ok := y.(Iterable); ok { - if err = xlist.checkMutable("apply += to"); err != nil { - break loop - } - listExtend(xlist, yiter) - z = xlist - } - } - if z == nil { - z, err = Binary(syntax.PLUS, x, y) - if err != nil { - break loop - } - } - - stack[sp] = z - sp++ - - case compile.NONE: - stack[sp] = None - sp++ - - case compile.TRUE: - stack[sp] = True - sp++ - - case compile.FALSE: - stack[sp] = False - sp++ - - case compile.MANDATORY: - stack[sp] = mandatory{} - sp++ - - case compile.JMP: - pc = arg - - case compile.CALL, compile.CALL_VAR, compile.CALL_KW, compile.CALL_VAR_KW: - var kwargs Value - if op == compile.CALL_KW || op == compile.CALL_VAR_KW { - kwargs = stack[sp-1] - sp-- - } - - var args Value - if op == compile.CALL_VAR || op == compile.CALL_VAR_KW { - args = stack[sp-1] - sp-- - } - - // named args (pairs) - var kvpairs []Tuple - if nkvpairs := int(arg & 0xff); nkvpairs > 0 { - kvpairs = make([]Tuple, 0, nkvpairs) - kvpairsAlloc := make(Tuple, 2*nkvpairs) // allocate a single backing array - sp -= 2 * nkvpairs - for i := 0; i < nkvpairs; i++ { - pair := kvpairsAlloc[:2:2] - kvpairsAlloc = kvpairsAlloc[2:] - pair[0] = stack[sp+2*i] // name - pair[1] = stack[sp+2*i+1] // value - kvpairs = append(kvpairs, pair) - } - } - if kwargs != nil { - // Add key/value items from **kwargs dictionary. - dict, ok := kwargs.(IterableMapping) - if !ok { - err = fmt.Errorf("argument after ** must be a mapping, not %s", kwargs.Type()) - break loop - } - items := dict.Items() - for _, item := range items { - if _, ok := item[0].(String); !ok { - err = fmt.Errorf("keywords must be strings, not %s", item[0].Type()) - break loop - } - } - if len(kvpairs) == 0 { - kvpairs = items - } else { - kvpairs = append(kvpairs, items...) - } - } - - // positional args - var positional Tuple - if npos := int(arg >> 8); npos > 0 { - positional = make(Tuple, npos) - sp -= npos - copy(positional, stack[sp:]) - } - if args != nil { - // Add elements from *args sequence. - iter := Iterate(args) - if iter == nil { - err = fmt.Errorf("argument after * must be iterable, not %s", args.Type()) - break loop - } - var elem Value - for iter.Next(&elem) { - positional = append(positional, elem) - } - iter.Done() - } - - function := stack[sp-1] - - if vmdebug { - fmt.Printf("VM call %s args=%s kwargs=%s @%s\n", - function, positional, kvpairs, f.Position(fr.pc)) - } - - thread.endProfSpan() - z, err2 := Call(thread, function, positional, kvpairs) - thread.beginProfSpan() - if err2 != nil { - err = err2 - break loop - } - if vmdebug { - fmt.Printf("Resuming %s @ %s\n", f.Name, f.Position(0)) - } - stack[sp-1] = z - - case compile.ITERPUSH: - x := stack[sp-1] - sp-- - iter := Iterate(x) - if iter == nil { - err = fmt.Errorf("%s value is not iterable", x.Type()) - break loop - } - iterstack = append(iterstack, iter) - - case compile.ITERJMP: - iter := iterstack[len(iterstack)-1] - if iter.Next(&stack[sp]) { - sp++ - } else { - pc = arg - } - - case compile.ITERPOP: - n := len(iterstack) - 1 - iterstack[n].Done() - iterstack = iterstack[:n] - - case compile.NOT: - stack[sp-1] = !stack[sp-1].Truth() - - case compile.RETURN: - result = stack[sp-1] - break loop - - case compile.SETINDEX: - z := stack[sp-1] - y := stack[sp-2] - x := stack[sp-3] - sp -= 3 - err = setIndex(x, y, z) - if err != nil { - break loop - } - - case compile.INDEX: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - z, err2 := getIndex(x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = z - sp++ - - case compile.ATTR: - x := stack[sp-1] - name := f.Prog.Names[arg] - y, err2 := getAttr(x, name) - if err2 != nil { - err = err2 - break loop - } - stack[sp-1] = y - - case compile.SETFIELD: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - name := f.Prog.Names[arg] - if err2 := setField(x, name, y); err2 != nil { - err = err2 - break loop - } - - case compile.MAKEDICT: - stack[sp] = new(Dict) - sp++ - - case compile.SETDICT, compile.SETDICTUNIQ: - dict := stack[sp-3].(*Dict) - k := stack[sp-2] - v := stack[sp-1] - sp -= 3 - oldlen := dict.Len() - if err2 := dict.SetKey(k, v); err2 != nil { - err = err2 - break loop - } - if op == compile.SETDICTUNIQ && dict.Len() == oldlen { - err = fmt.Errorf("duplicate key: %v", k) - break loop - } - - case compile.APPEND: - elem := stack[sp-1] - list := stack[sp-2].(*List) - sp -= 2 - list.elems = append(list.elems, elem) - - case compile.SLICE: - x := stack[sp-4] - lo := stack[sp-3] - hi := stack[sp-2] - step := stack[sp-1] - sp -= 4 - res, err2 := slice(x, lo, hi, step) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = res - sp++ - - case compile.UNPACK: - n := int(arg) - iterable := stack[sp-1] - sp-- - iter := Iterate(iterable) - if iter == nil { - err = fmt.Errorf("got %s in sequence assignment", iterable.Type()) - break loop - } - i := 0 - sp += n - for i < n && iter.Next(&stack[sp-1-i]) { - i++ - } - var dummy Value - if iter.Next(&dummy) { - // NB: Len may return -1 here in obscure cases. - err = fmt.Errorf("too many values to unpack (got %d, want %d)", Len(iterable), n) - break loop - } - iter.Done() - if i < n { - err = fmt.Errorf("too few values to unpack (got %d, want %d)", i, n) - break loop - } - - case compile.CJMP: - if stack[sp-1].Truth() { - pc = arg - } - sp-- - - case compile.CONSTANT: - stack[sp] = fn.module.constants[arg] - sp++ - - case compile.MAKETUPLE: - n := int(arg) - tuple := make(Tuple, n) - sp -= n - copy(tuple, stack[sp:]) - stack[sp] = tuple - sp++ - - case compile.MAKELIST: - n := int(arg) - elems := make([]Value, n) - sp -= n - copy(elems, stack[sp:]) - stack[sp] = NewList(elems) - sp++ - - case compile.MAKEFUNC: - funcode := f.Prog.Functions[arg] - tuple := stack[sp-1].(Tuple) - n := len(tuple) - len(funcode.Freevars) - defaults := tuple[:n:n] - freevars := tuple[n:] - stack[sp-1] = &Function{ - funcode: funcode, - module: fn.module, - defaults: defaults, - freevars: freevars, - } - - case compile.LOAD: - n := int(arg) - module := string(stack[sp-1].(String)) - sp-- - - if thread.Load == nil { - err = fmt.Errorf("load not implemented by this application") - break loop - } - - thread.endProfSpan() - dict, err2 := thread.Load(thread, module) - thread.beginProfSpan() - if err2 != nil { - err = fmt.Errorf("cannot load %s: %v", module, err2) - break loop - } - - for i := 0; i < n; i++ { - from := string(stack[sp-1-i].(String)) - v, ok := dict[from] - if !ok { - err = fmt.Errorf("load: name %s not found in module %s", from, module) - if n := spell.Nearest(from, dict.Keys()); n != "" { - err = fmt.Errorf("%s (did you mean %s?)", err, n) - } - break loop - } - stack[sp-1-i] = v - } - - case compile.SETLOCAL: - locals[arg] = stack[sp-1] - sp-- - - case compile.SETCELL: - x := stack[sp-2] - y := stack[sp-1] - sp -= 2 - y.(*cell).v = x - - case compile.SETGLOBAL: - fn.module.globals[arg] = stack[sp-1] - sp-- - - case compile.LOCAL: - x := locals[arg] - if x == nil { - err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name) - break loop - } - stack[sp] = x - sp++ - - case compile.FREE: - stack[sp] = fn.freevars[arg] - sp++ - - case compile.CELL: - x := stack[sp-1] - stack[sp-1] = x.(*cell).v - - case compile.GLOBAL: - x := fn.module.globals[arg] - if x == nil { - err = fmt.Errorf("global variable %s referenced before assignment", f.Prog.Globals[arg].Name) - break loop - } - stack[sp] = x - sp++ - - case compile.PREDECLARED: - name := f.Prog.Names[arg] - x := fn.module.predeclared[name] - if x == nil { - err = fmt.Errorf("internal error: predeclared variable %s is uninitialized", name) - break loop - } - stack[sp] = x - sp++ - - case compile.UNIVERSAL: - stack[sp] = Universe[f.Prog.Names[arg]] - sp++ - - default: - err = fmt.Errorf("unimplemented: %s", op) - break loop - } - } - - // ITERPOP the rest of the iterator stack. - for _, iter := range iterstack { - iter.Done() - } - - fr.locals = nil - - return result, err -} - -// mandatory is a sentinel value used in a function's defaults tuple -// to indicate that a (keyword-only) parameter is mandatory. -type mandatory struct{} - -func (mandatory) String() string { return "mandatory" } -func (mandatory) Type() string { return "mandatory" } -func (mandatory) Freeze() {} // immutable -func (mandatory) Truth() Bool { return False } -func (mandatory) Hash() (uint32, error) { return 0, nil } - -// A cell is a box containing a Value. -// Local variables marked as cells hold their value indirectly -// so that they may be shared by outer and inner nested functions. -// Cells are always accessed using indirect CELL/SETCELL instructions. -// The FreeVars tuple contains only cells. -// The FREE instruction always yields a cell. -type cell struct{ v Value } - -func (c *cell) String() string { return "cell" } -func (c *cell) Type() string { return "cell" } -func (c *cell) Freeze() { - if c.v != nil { - c.v.Freeze() - } -} -func (c *cell) Truth() Bool { panic("unreachable") } -func (c *cell) Hash() (uint32, error) { panic("unreachable") } diff --git a/vendor/github.com/k14s/starlark-go/starlark/library.go b/vendor/github.com/k14s/starlark-go/starlark/library.go deleted file mode 100644 index 738641e8d..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/library.go +++ /dev/null @@ -1,2111 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -// This file defines the library of built-ins. -// -// Built-ins must explicitly check the "frozen" flag before updating -// mutable types such as lists and dicts. - -import ( - "errors" - "fmt" - "math/big" - "os" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "github.com/k14s/starlark-go/syntax" -) - -// Universe defines the set of universal built-ins, such as None, True, and len. -// -// The Go application may add or remove items from the -// universe dictionary before Starlark evaluation begins. -// All values in the dictionary must be immutable. -// Starlark programs cannot modify the dictionary. -var Universe StringDict - -func init() { - // https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-constants-and-functions - Universe = StringDict{ - "None": None, - "True": True, - "False": False, - "any": NewBuiltin("any", any), - "all": NewBuiltin("all", all), - "bool": NewBuiltin("bool", bool_), - "chr": NewBuiltin("chr", chr), - "dict": NewBuiltin("dict", dict), - "dir": NewBuiltin("dir", dir), - "enumerate": NewBuiltin("enumerate", enumerate), - "fail": NewBuiltin("fail", fail), - "float": NewBuiltin("float", float), // requires resolve.AllowFloat - "getattr": NewBuiltin("getattr", getattr), - "hasattr": NewBuiltin("hasattr", hasattr), - "hash": NewBuiltin("hash", hash), - "int": NewBuiltin("int", int_), - "len": NewBuiltin("len", len_), - "list": NewBuiltin("list", list), - "max": NewBuiltin("max", minmax), - "min": NewBuiltin("min", minmax), - "ord": NewBuiltin("ord", ord), - "print": NewBuiltin("print", print), - "range": NewBuiltin("range", range_), - "repr": NewBuiltin("repr", repr), - "reversed": NewBuiltin("reversed", reversed), - "set": NewBuiltin("set", set), // requires resolve.AllowSet - "sorted": NewBuiltin("sorted", sorted), - "str": NewBuiltin("str", str), - "tuple": NewBuiltin("tuple", tuple), - "type": NewBuiltin("type", type_), - "zip": NewBuiltin("zip", zip), - } -} - -type builtinMethod func(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) - -// methods of built-in types -// https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-methods -var ( - dictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, - } - - listMethods = map[string]builtinMethod{ - "append": list_append, - "clear": list_clear, - "extend": list_extend, - "index": list_index, - "insert": list_insert, - "pop": list_pop, - "remove": list_remove, - } - - stringMethods = map[string]builtinMethod{ - "capitalize": string_capitalize, - "codepoint_ords": string_iterable, - "codepoints": string_iterable, // sic - "count": string_count, - "elem_ords": string_iterable, - "elems": string_iterable, // sic - "endswith": string_startswith, // sic - "find": string_find, - "format": string_format, - "index": string_index, - "isalnum": string_isalnum, - "isalpha": string_isalpha, - "isdigit": string_isdigit, - "islower": string_islower, - "isspace": string_isspace, - "istitle": string_istitle, - "isupper": string_isupper, - "join": string_join, - "lower": string_lower, - "lstrip": string_strip, // sic - "partition": string_partition, - "replace": string_replace, - "rfind": string_rfind, - "rindex": string_rindex, - "rpartition": string_partition, // sic - "rsplit": string_split, // sic - "rstrip": string_strip, // sic - "split": string_split, - "splitlines": string_splitlines, - "startswith": string_startswith, - "strip": string_strip, - "title": string_title, - "upper": string_upper, - } - - setMethods = map[string]builtinMethod{ - "union": set_union, - } -) - -func builtinAttr(recv Value, name string, methods map[string]builtinMethod) (Value, error) { - method := methods[name] - if method == nil { - return nil, nil // no such method - } - - // Allocate a closure over 'method'. - impl := func(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return method(b, args, kwargs) - } - return NewBuiltin(name, impl).BindReceiver(recv), nil -} - -func builtinAttrNames(methods map[string]builtinMethod) []string { - names := make([]string, 0, len(methods)) - for name := range methods { - names = append(names, name) - } - sort.Strings(names) - return names -} - -// ---- built-in functions ---- - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#all -func all(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("all", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if !x.Truth() { - return False, nil - } - } - return True, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#any -func any(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("any", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if x.Truth() { - return True, nil - } - } - return False, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#bool -func bool_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value = False - if err := UnpackPositionalArgs("bool", args, kwargs, 0, &x); err != nil { - return nil, err - } - return x.Truth(), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#chr -func chr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("chr does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("chr: got %d arguments, want 1", len(args)) - } - i, err := AsInt32(args[0]) - if err != nil { - return nil, fmt.Errorf("chr: got %s, want int", args[0].Type()) - } - if i < 0 { - return nil, fmt.Errorf("chr: Unicode code point %d out of range (<0)", i) - } - if i > unicode.MaxRune { - return nil, fmt.Errorf("chr: Unicode code point U+%X out of range (>0x10FFFF)", i) - } - return String(string(i)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict -func dict(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) > 1 { - return nil, fmt.Errorf("dict: got %d arguments, want at most 1", len(args)) - } - dict := new(Dict) - if err := updateDict(dict, args, kwargs); err != nil { - return nil, fmt.Errorf("dict: %v", err) - } - return dict, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dir -func dir(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("dir does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("dir: got %d arguments, want 1", len(args)) - } - - var names []string - if x, ok := args[0].(HasAttrs); ok { - names = x.AttrNames() - } - sort.Strings(names) - elems := make([]Value, len(names)) - for i, name := range names { - elems[i] = String(name) - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#enumerate -func enumerate(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - var start int - if err := UnpackPositionalArgs("enumerate", args, kwargs, 1, &iterable, &start); err != nil { - return nil, err - } - - iter := iterable.Iterate() - if iter == nil { - return nil, fmt.Errorf("enumerate: got %s, want iterable", iterable.Type()) - } - defer iter.Done() - - var pairs []Value - var x Value - - if n := Len(iterable); n >= 0 { - // common case: known length - pairs = make([]Value, 0, n) - array := make(Tuple, 2*n) // allocate a single backing array - for i := 0; iter.Next(&x); i++ { - pair := array[:2:2] - array = array[2:] - pair[0] = MakeInt(start + i) - pair[1] = x - pairs = append(pairs, pair) - } - } else { - // non-sequence (unknown length) - for i := 0; iter.Next(&x); i++ { - pair := Tuple{MakeInt(start + i), x} - pairs = append(pairs, pair) - } - } - - return NewList(pairs), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#fail -func fail(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - sep := " " - if err := UnpackArgs("fail", nil, kwargs, "sep?", &sep); err != nil { - return nil, err - } - buf := new(strings.Builder) - buf.WriteString("fail: ") - for i, v := range args { - if i > 0 { - buf.WriteString(sep) - } - if s, ok := AsString(v); ok { - buf.WriteString(s) - } else { - writeValue(buf, v, nil) - } - } - - return nil, errors.New(buf.String()) -} - -func float(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("float does not accept keyword arguments") - } - if len(args) == 0 { - return Float(0.0), nil - } - if len(args) != 1 { - return nil, fmt.Errorf("float got %d arguments, wants 1", len(args)) - } - switch x := args[0].(type) { - case Bool: - if x { - return Float(1.0), nil - } else { - return Float(0.0), nil - } - case Int: - return x.Float(), nil - case Float: - return x, nil - case String: - f, err := strconv.ParseFloat(string(x), 64) - if err != nil { - return nil, nameErr(b, err) - } - return Float(f), nil - default: - return nil, fmt.Errorf("float got %s, want number or string", x.Type()) - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#getattr -func getattr(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object, dflt Value - var name string - if err := UnpackPositionalArgs("getattr", args, kwargs, 2, &object, &name, &dflt); err != nil { - return nil, err - } - if object, ok := object.(HasAttrs); ok { - v, err := object.Attr(name) - if err != nil { - // An error could mean the field doesn't exist, - // or it exists but could not be computed. - if dflt != nil { - return dflt, nil - } - return nil, nameErr(b, err) - } - if v != nil { - return v, nil - } - // (nil, nil) => no such field - } - if dflt != nil { - return dflt, nil - } - return nil, fmt.Errorf("getattr: %s has no .%s field or method", object.Type(), name) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#hasattr -func hasattr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object Value - var name string - if err := UnpackPositionalArgs("hasattr", args, kwargs, 2, &object, &name); err != nil { - return nil, err - } - if object, ok := object.(HasAttrs); ok { - v, err := object.Attr(name) - if err == nil { - return Bool(v != nil), nil - } - - // An error does not conclusively indicate presence or - // absence of a field: it could occur while computing - // the value of a present attribute, or it could be a - // "no such attribute" error with details. - for _, x := range object.AttrNames() { - if x == name { - return True, nil - } - } - } - return False, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#hash -func hash(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var s string - if err := UnpackPositionalArgs("hash", args, kwargs, 1, &s); err != nil { - return nil, err - } - - // The Starlark spec requires that the hash function be - // deterministic across all runs, motivated by the need - // for reproducibility of builds. Thus we cannot call - // String.Hash, which uses the fastest implementation - // available, because as varies across process restarts, - // and may evolve with the implementation. - - return MakeInt(int(javaStringHash(s))), nil -} - -// javaStringHash returns the same hash as would be produced by -// java.lang.String.hashCode. This requires transcoding the string to -// UTF-16; transcoding may introduce Unicode replacement characters -// U+FFFD if s does not contain valid UTF-8. -func javaStringHash(s string) (h int32) { - for _, r := range s { - if utf16.IsSurrogate(r) { - c1, c2 := utf16.EncodeRune(r) - h = 31*h + c1 - h = 31*h + c2 - } else { - h = 31*h + r // r may be U+FFFD - } - } - return h -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#int -func int_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value = zero - var base Value - if err := UnpackArgs("int", args, kwargs, "x", &x, "base?", &base); err != nil { - return nil, err - } - - // "If x is not a number or base is given, x must be a string." - if s, ok := AsString(x); ok { - b := 10 - if base != nil { - var err error - b, err = AsInt32(base) - if err != nil || b != 0 && (b < 2 || b > 36) { - return nil, fmt.Errorf("int: base must be an integer >= 2 && <= 36") - } - } - - orig := s // save original for error message - - // remove sign - var neg bool - if s != "" { - if s[0] == '+' { - s = s[1:] - } else if s[0] == '-' { - neg = true - s = s[1:] - } - } - - // remove base prefix - baseprefix := 0 - if len(s) > 1 && s[0] == '0' { - if len(s) > 2 { - switch s[1] { - case 'o', 'O': - s = s[2:] - baseprefix = 8 - case 'x', 'X': - s = s[2:] - baseprefix = 16 - case 'b', 'B': - s = s[2:] - baseprefix = 2 - } - } - - // For automatic base detection, - // a string starting with zero - // must be all zeros. - // Thus we reject int("0755", 0). - if baseprefix == 0 && b == 0 { - for i := 1; i < len(s); i++ { - if s[i] != '0' { - goto invalid - } - } - return zero, nil - } - - if b != 0 && baseprefix != 0 && baseprefix != b { - // Explicit base doesn't match prefix, - // e.g. int("0o755", 16). - goto invalid - } - } - - // select base - if b == 0 { - if baseprefix != 0 { - b = baseprefix - } else { - b = 10 - } - } - - // we explicitly handled sign above. - // if a sign remains, it is invalid. - if s != "" && (s[0] == '-' || s[0] == '+') { - goto invalid - } - - // s has no sign or base prefix. - // - // int(x) permits arbitrary precision, unlike the scanner. - if i, ok := new(big.Int).SetString(s, b); ok { - res := MakeBigInt(i) - if neg { - res = zero.Sub(res) - } - return res, nil - } - - invalid: - return nil, fmt.Errorf("int: invalid literal with base %d: %s", b, orig) - } - - if base != nil { - return nil, fmt.Errorf("int: can't convert non-string with explicit base") - } - - if b, ok := x.(Bool); ok { - if b { - return one, nil - } else { - return zero, nil - } - } - - i, err := NumberToInt(x) - if err != nil { - return nil, fmt.Errorf("int: %s", err) - } - return i, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#len -func len_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("len", args, kwargs, 1, &x); err != nil { - return nil, err - } - len := Len(x) - if len < 0 { - return nil, fmt.Errorf("len: value of type %s has no len", x.Type()) - } - return MakeInt(len), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list -func list(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("list", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - var elems []Value - if iterable != nil { - iter := iterable.Iterate() - defer iter.Done() - if n := Len(iterable); n > 0 { - elems = make([]Value, 0, n) // preallocate if length known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#min -func minmax(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) == 0 { - return nil, fmt.Errorf("%s requires at least one positional argument", b.Name()) - } - var keyFunc Callable - if err := UnpackArgs(b.Name(), nil, kwargs, "key?", &keyFunc); err != nil { - return nil, err - } - var op syntax.Token - if b.Name() == "max" { - op = syntax.GT - } else { - op = syntax.LT - } - var iterable Value - if len(args) == 1 { - iterable = args[0] - } else { - iterable = args - } - iter := Iterate(iterable) - if iter == nil { - return nil, fmt.Errorf("%s: %s value is not iterable", b.Name(), iterable.Type()) - } - defer iter.Done() - var extremum Value - if !iter.Next(&extremum) { - return nil, nameErr(b, "argument is an empty sequence") - } - - var extremeKey Value - var keyargs Tuple - if keyFunc == nil { - extremeKey = extremum - } else { - keyargs = Tuple{extremum} - res, err := Call(thread, keyFunc, keyargs, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - extremeKey = res - } - - var x Value - for iter.Next(&x) { - var key Value - if keyFunc == nil { - key = x - } else { - keyargs[0] = x - res, err := Call(thread, keyFunc, keyargs, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - key = res - } - - if ok, err := Compare(op, key, extremeKey); err != nil { - return nil, nameErr(b, err) - } else if ok { - extremum = x - extremeKey = key - } - } - return extremum, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#ord -func ord(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("ord does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("ord: got %d arguments, want 1", len(args)) - } - s, ok := AsString(args[0]) - if !ok { - return nil, fmt.Errorf("ord: got %s, want string", args[0].Type()) - } - r, sz := utf8.DecodeRuneInString(s) - if sz == 0 || sz != len(s) { - n := utf8.RuneCountInString(s) - return nil, fmt.Errorf("ord: string encodes %d Unicode code points, want 1", n) - } - return MakeInt(int(r)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#print -func print(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - sep := " " - if err := UnpackArgs("print", nil, kwargs, "sep?", &sep); err != nil { - return nil, err - } - buf := new(strings.Builder) - for i, v := range args { - if i > 0 { - buf.WriteString(sep) - } - if s, ok := AsString(v); ok { - buf.WriteString(s) - } else { - writeValue(buf, v, nil) - } - } - - s := buf.String() - if thread.Print != nil { - thread.Print(thread, s) - } else { - fmt.Fprintln(os.Stderr, s) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#range -func range_(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var start, stop, step int - step = 1 - if err := UnpackPositionalArgs("range", args, kwargs, 1, &start, &stop, &step); err != nil { - return nil, err - } - - // TODO(adonovan): analyze overflow/underflows cases for 32-bit implementations. - if len(args) == 1 { - // range(stop) - start, stop = 0, start - } - if step == 0 { - // we were given range(start, stop, 0) - return nil, nameErr(b, "step argument must not be zero") - } - - return rangeValue{start: start, stop: stop, step: step, len: rangeLen(start, stop, step)}, nil -} - -// A rangeValue is a comparable, immutable, indexable sequence of integers -// defined by the three parameters to a range(...) call. -// Invariant: step != 0. -type rangeValue struct{ start, stop, step, len int } - -var ( - _ Indexable = rangeValue{} - _ Sequence = rangeValue{} - _ Comparable = rangeValue{} - _ Sliceable = rangeValue{} -) - -func (r rangeValue) Len() int { return r.len } -func (r rangeValue) Index(i int) Value { return MakeInt(r.start + i*r.step) } -func (r rangeValue) Iterate() Iterator { return &rangeIterator{r, 0} } - -// rangeLen calculates the length of a range with the provided start, stop, and step. -// caller must ensure that step is non-zero. -func rangeLen(start, stop, step int) int { - switch { - case step > 0: - if stop > start { - return (stop-1-start)/step + 1 - } - case step < 0: - if start > stop { - return (start-1-stop)/-step + 1 - } - default: - panic("rangeLen: zero step") - } - return 0 -} - -func (r rangeValue) Slice(start, end, step int) Value { - newStart := r.start + r.step*start - newStop := r.start + r.step*end - newStep := r.step * step - return rangeValue{ - start: newStart, - stop: newStop, - step: newStep, - len: rangeLen(newStart, newStop, newStep), - } -} - -func (r rangeValue) Freeze() {} // immutable -func (r rangeValue) String() string { - if r.step != 1 { - return fmt.Sprintf("range(%d, %d, %d)", r.start, r.stop, r.step) - } else if r.start != 0 { - return fmt.Sprintf("range(%d, %d)", r.start, r.stop) - } else { - return fmt.Sprintf("range(%d)", r.stop) - } -} -func (r rangeValue) Type() string { return "range" } -func (r rangeValue) Truth() Bool { return r.len > 0 } -func (r rangeValue) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: range") } - -func (x rangeValue) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(rangeValue) - switch op { - case syntax.EQL: - return rangeEqual(x, y), nil - case syntax.NEQ: - return !rangeEqual(x, y), nil - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func rangeEqual(x, y rangeValue) bool { - // Two ranges compare equal if they denote the same sequence. - if x.len != y.len { - return false // sequences differ in length - } - if x.len == 0 { - return true // both sequences are empty - } - if x.start != y.start { - return false // first element differs - } - return x.len == 1 || x.step == y.step -} - -func (r rangeValue) contains(x Int) bool { - x32, err := AsInt32(x) - if err != nil { - return false // out of range - } - delta := x32 - r.start - quo, rem := delta/r.step, delta%r.step - return rem == 0 && 0 <= quo && quo < r.len -} - -type rangeIterator struct { - r rangeValue - i int -} - -func (it *rangeIterator) Next(p *Value) bool { - if it.i < it.r.len { - *p = it.r.Index(it.i) - it.i++ - return true - } - return false -} -func (*rangeIterator) Done() {} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#repr -func repr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("repr", args, kwargs, 1, &x); err != nil { - return nil, err - } - return String(x.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#reversed -func reversed(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("reversed", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var elems []Value - if n := Len(args[0]); n >= 0 { - elems = make([]Value, 0, n) // preallocate if length known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - n := len(elems) - for i := 0; i < n>>1; i++ { - elems[i], elems[n-1-i] = elems[n-1-i], elems[i] - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#set -func set(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("set", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - set := new(Set) - if iterable != nil { - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if err := set.Insert(x); err != nil { - return nil, nameErr(b, err) - } - } - } - return set, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#sorted -func sorted(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - // Oddly, Python's sorted permits all arguments to be positional, thus so do we. - var iterable Iterable - var key Callable - var reverse bool - if err := UnpackArgs("sorted", args, kwargs, - "iterable", &iterable, - "key?", &key, - "reverse?", &reverse, - ); err != nil { - return nil, err - } - - iter := iterable.Iterate() - defer iter.Done() - var values []Value - if n := Len(iterable); n > 0 { - values = make(Tuple, 0, n) // preallocate if length is known - } - var x Value - for iter.Next(&x) { - values = append(values, x) - } - - // Derive keys from values by applying key function. - var keys []Value - if key != nil { - keys = make([]Value, len(values)) - for i, v := range values { - k, err := Call(thread, key, Tuple{v}, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - keys[i] = k - } - } - - slice := &sortSlice{keys: keys, values: values} - if reverse { - sort.Stable(sort.Reverse(slice)) - } else { - sort.Stable(slice) - } - return NewList(slice.values), slice.err -} - -type sortSlice struct { - keys []Value // nil => values[i] is key - values []Value - err error -} - -func (s *sortSlice) Len() int { return len(s.values) } -func (s *sortSlice) Less(i, j int) bool { - keys := s.keys - if s.keys == nil { - keys = s.values - } - ok, err := Compare(syntax.LT, keys[i], keys[j]) - if err != nil { - s.err = err - } - return ok -} -func (s *sortSlice) Swap(i, j int) { - if s.keys != nil { - s.keys[i], s.keys[j] = s.keys[j], s.keys[i] - } - s.values[i], s.values[j] = s.values[j], s.values[i] -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#str -func str(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("str does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("str: got %d arguments, want exactly 1", len(args)) - } - x := args[0] - if _, ok := AsString(x); !ok { - x = String(x.String()) - } - return x, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#tuple -func tuple(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("tuple", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - if len(args) == 0 { - return Tuple(nil), nil - } - iter := iterable.Iterate() - defer iter.Done() - var elems Tuple - if n := Len(iterable); n > 0 { - elems = make(Tuple, 0, n) // preallocate if length is known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - return elems, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#type -func type_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("type does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("type: got %d arguments, want exactly 1", len(args)) - } - return String(args[0].Type()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#zip -func zip(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("zip does not accept keyword arguments") - } - rows, cols := 0, len(args) - iters := make([]Iterator, cols) - defer func() { - for _, iter := range iters { - if iter != nil { - iter.Done() - } - } - }() - for i, seq := range args { - it := Iterate(seq) - if it == nil { - return nil, fmt.Errorf("zip: argument #%d is not iterable: %s", i+1, seq.Type()) - } - iters[i] = it - n := Len(seq) - if i == 0 || n < rows { - rows = n // possibly -1 - } - } - var result []Value - if rows >= 0 { - // length known - result = make([]Value, rows) - array := make(Tuple, cols*rows) // allocate a single backing array - for i := 0; i < rows; i++ { - tuple := array[:cols:cols] - array = array[cols:] - for j, iter := range iters { - iter.Next(&tuple[j]) - } - result[i] = tuple - } - } else { - // length not known - outer: - for { - tuple := make(Tuple, cols) - for i, iter := range iters { - if !iter.Next(&tuple[i]) { - break outer - } - } - result = append(result, tuple) - } - } - return NewList(result), nil -} - -// ---- methods of built-in types --- - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get -func dict_get(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var key, dflt Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { - return nil, err - } - if v, ok, err := b.Receiver().(*Dict).Get(key); err != nil { - return nil, nameErr(b, err) - } else if ok { - return v, nil - } else if dflt != nil { - return dflt, nil - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear -func dict_clear(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return None, b.Receiver().(*Dict).Clear() -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items -func dict_items(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - items := b.Receiver().(*Dict).Items() - res := make([]Value, len(items)) - for i, item := range items { - res[i] = item // convert [2]Value to Value - } - return NewList(res), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys -func dict_keys(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return NewList(b.Receiver().(*Dict).Keys()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop -func dict_pop(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var k, d Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { - return nil, err - } - if v, found, err := b.Receiver().(*Dict).Delete(k); err != nil { - return nil, nameErr(b, err) // dict is frozen or key is unhashable - } else if found { - return v, nil - } else if d != nil { - return d, nil - } - return nil, nameErr(b, "missing key") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem -func dict_popitem(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := b.Receiver().(*Dict) - k, ok := recv.ht.first() - if !ok { - return nil, nameErr(b, "empty dict") - } - v, _, err := recv.Delete(k) - if err != nil { - return nil, nameErr(b, err) // dict is frozen - } - return Tuple{k, v}, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault -func dict_setdefault(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var key, dflt Value = nil, None - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { - return nil, err - } - dict := b.Receiver().(*Dict) - if v, ok, err := dict.Get(key); err != nil { - return nil, nameErr(b, err) - } else if ok { - return v, nil - } else if err := dict.SetKey(key, dflt); err != nil { - return nil, nameErr(b, err) - } else { - return dflt, nil - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_update(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) > 1 { - return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) - } - if err := updateDict(b.Receiver().(*Dict), args, kwargs); err != nil { - return nil, fmt.Errorf("update: %v", err) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_values(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - items := b.Receiver().(*Dict).Items() - res := make([]Value, len(items)) - for i, item := range items { - res[i] = item[1] - } - return NewList(res), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·append -func list_append(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &object); err != nil { - return nil, err - } - recv := b.Receiver().(*List) - if err := recv.checkMutable("append to"); err != nil { - return nil, nameErr(b, err) - } - recv.elems = append(recv.elems, object) - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·clear -func list_clear(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - if err := b.Receiver().(*List).Clear(); err != nil { - return nil, nameErr(b, err) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·extend -func list_extend(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { - return nil, err - } - if err := recv.checkMutable("extend"); err != nil { - return nil, nameErr(b, err) - } - listExtend(recv, iterable) - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·index -func list_index(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var value, start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value, &start_, &end_); err != nil { - return nil, err - } - - recv := b.Receiver().(*List) - start, end, err := indices(start_, end_, recv.Len()) - if err != nil { - return nil, nameErr(b, err) - } - - for i := start; i < end; i++ { - if eq, err := Equal(recv.elems[i], value); err != nil { - return nil, nameErr(b, err) - } else if eq { - return MakeInt(i), nil - } - } - return nil, nameErr(b, "value not in list") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·insert -func list_insert(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var index int - var object Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &index, &object); err != nil { - return nil, err - } - if err := recv.checkMutable("insert into"); err != nil { - return nil, nameErr(b, err) - } - - if index < 0 { - index += recv.Len() - } - - if index >= recv.Len() { - // end - recv.elems = append(recv.elems, object) - } else { - if index < 0 { - index = 0 // start - } - recv.elems = append(recv.elems, nil) - copy(recv.elems[index+1:], recv.elems[index:]) // slide up one - recv.elems[index] = object - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·remove -func list_remove(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var value Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value); err != nil { - return nil, err - } - if err := recv.checkMutable("remove from"); err != nil { - return nil, nameErr(b, err) - } - for i, elem := range recv.elems { - if eq, err := Equal(elem, value); err != nil { - return nil, fmt.Errorf("remove: %v", err) - } else if eq { - recv.elems = append(recv.elems[:i], recv.elems[i+1:]...) - return None, nil - } - } - return nil, fmt.Errorf("remove: element not found") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·pop -func list_pop(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver() - list := recv.(*List) - n := list.Len() - i := n - 1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &i); err != nil { - return nil, err - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return nil, nameErr(b, outOfRange(origI, n, list)) - } - if err := list.checkMutable("pop from"); err != nil { - return nil, nameErr(b, err) - } - res := list.elems[i] - list.elems = append(list.elems[:i], list.elems[i+1:]...) - return res, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·capitalize -func string_capitalize(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - s := string(b.Receiver().(String)) - res := new(strings.Builder) - res.Grow(len(s)) - for i, r := range s { - if i == 0 { - r = unicode.ToTitle(r) - } else { - r = unicode.ToLower(r) - } - res.WriteRune(r) - } - return String(res.String()), nil -} - -// string_iterable returns an unspecified iterable value whose iterator yields: -// - elems: successive 1-byte substrings -// - codepoints: successive substrings that encode a single Unicode code point. -// - elem_ords: numeric values of successive bytes -// - codepoint_ords: numeric values of successive Unicode code points -func string_iterable(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return stringIterable{ - s: b.Receiver().(String), - ords: b.Name()[len(b.Name())-2] == 'd', - codepoints: b.Name()[0] == 'c', - }, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·count -func string_count(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var sub string - var start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { - return nil, err - } - - recv := string(b.Receiver().(String)) - start, end, err := indices(start_, end_, len(recv)) - if err != nil { - return nil, nameErr(b, err) - } - - var slice string - if start < end { - slice = recv[start:end] - } - return MakeInt(strings.Count(slice, sub)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalnum -func string_isalnum(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsLetter(r) && !unicode.IsDigit(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalpha -func string_isalpha(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsLetter(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isdigit -func string_isdigit(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsDigit(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·islower -func string_islower(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - return Bool(isCasedString(recv) && recv == strings.ToLower(recv)), nil -} - -// isCasedString reports whether its argument contains any cased code points. -func isCasedString(s string) bool { - for _, r := range s { - if isCasedRune(r) { - return true - } - } - return false -} - -func isCasedRune(r rune) bool { - // It's unclear what the correct behavior is for a rune such as 'ffi', - // a lowercase letter with no upper or title case and no SimpleFold. - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || unicode.SimpleFold(r) != r -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isspace -func string_isspace(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsSpace(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·istitle -func string_istitle(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - - // Python semantics differ from x==strings.{To,}Title(x) in Go: - // "uppercase characters may only follow uncased characters and - // lowercase characters only cased ones." - var cased, prevCased bool - for _, r := range recv { - if 'A' <= r && r <= 'Z' || unicode.IsTitle(r) { // e.g. "Dž" - if prevCased { - return False, nil - } - prevCased = true - cased = true - } else if unicode.IsLower(r) { - if !prevCased { - return False, nil - } - prevCased = true - cased = true - } else if unicode.IsUpper(r) { - return False, nil - } else { - prevCased = false - } - } - return Bool(cased), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isupper -func string_isupper(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - return Bool(isCasedString(recv) && recv == strings.ToUpper(recv)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·find -func string_find(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, true, false) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·format -func string_format(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - format := string(b.Receiver().(String)) - var auto, manual bool // kinds of positional indexing used - buf := new(strings.Builder) - index := 0 - for { - literal := format - i := strings.IndexByte(format, '{') - if i >= 0 { - literal = format[:i] - } - - // Replace "}}" with "}" in non-field portion, rejecting a lone '}'. - for { - j := strings.IndexByte(literal, '}') - if j < 0 { - buf.WriteString(literal) - break - } - if len(literal) == j+1 || literal[j+1] != '}' { - return nil, fmt.Errorf("format: single '}' in format") - } - buf.WriteString(literal[:j+1]) - literal = literal[j+2:] - } - - if i < 0 { - break // end of format string - } - - if i+1 < len(format) && format[i+1] == '{' { - // "{{" means a literal '{' - buf.WriteByte('{') - format = format[i+2:] - continue - } - - format = format[i+1:] - i = strings.IndexByte(format, '}') - if i < 0 { - return nil, fmt.Errorf("format: unmatched '{' in format") - } - - var arg Value - conv := "s" - var spec string - - field := format[:i] - format = format[i+1:] - - var name string - if i := strings.IndexByte(field, '!'); i < 0 { - // "name" or "name:spec" - if i := strings.IndexByte(field, ':'); i < 0 { - name = field - } else { - name = field[:i] - spec = field[i+1:] - } - } else { - // "name!conv" or "name!conv:spec" - name = field[:i] - field = field[i+1:] - // "conv" or "conv:spec" - if i := strings.IndexByte(field, ':'); i < 0 { - conv = field - } else { - conv = field[:i] - spec = field[i+1:] - } - } - - if name == "" { - // "{}": automatic indexing - if manual { - return nil, fmt.Errorf("format: cannot switch from manual field specification to automatic field numbering") - } - auto = true - if index >= len(args) { - return nil, fmt.Errorf("format: tuple index out of range") - } - arg = args[index] - index++ - } else if num, ok := decimal(name); ok { - // positional argument - if auto { - return nil, fmt.Errorf("format: cannot switch from automatic field numbering to manual field specification") - } - manual = true - if num >= len(args) { - return nil, fmt.Errorf("format: tuple index out of range") - } else { - arg = args[num] - } - } else { - // keyword argument - for _, kv := range kwargs { - if string(kv[0].(String)) == name { - arg = kv[1] - break - } - } - if arg == nil { - // Starlark does not support Python's x.y or a[i] syntaxes, - // or nested use of {...}. - if strings.Contains(name, ".") { - return nil, fmt.Errorf("format: attribute syntax x.y is not supported in replacement fields: %s", name) - } - if strings.Contains(name, "[") { - return nil, fmt.Errorf("format: element syntax a[i] is not supported in replacement fields: %s", name) - } - if strings.Contains(name, "{") { - return nil, fmt.Errorf("format: nested replacement fields not supported") - } - return nil, fmt.Errorf("format: keyword %s not found", name) - } - } - - if spec != "" { - // Starlark does not support Python's format_spec features. - return nil, fmt.Errorf("format spec features not supported in replacement fields: %s", spec) - } - - switch conv { - case "s": - if str, ok := AsString(arg); ok { - buf.WriteString(str) - } else { - writeValue(buf, arg, nil) - } - case "r": - writeValue(buf, arg, nil) - default: - return nil, fmt.Errorf("format: unknown conversion %q", conv) - } - } - return String(buf.String()), nil -} - -// decimal interprets s as a sequence of decimal digits. -func decimal(s string) (x int, ok bool) { - n := len(s) - for i := 0; i < n; i++ { - digit := s[i] - '0' - if digit > 9 { - return 0, false - } - x = x*10 + int(digit) - if x < 0 { - return 0, false // underflow - } - } - return x, true -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·index -func string_index(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, false, false) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·join -func string_join(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - buf := new(strings.Builder) - var x Value - for i := 0; iter.Next(&x); i++ { - if i > 0 { - buf.WriteString(recv) - } - s, ok := AsString(x) - if !ok { - return nil, fmt.Errorf("join: in list, want string, got %s", x.Type()) - } - buf.WriteString(s) - } - return String(buf.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lower -func string_lower(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return String(strings.ToLower(string(b.Receiver().(String)))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·partition -func string_partition(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var sep string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sep); err != nil { - return nil, err - } - if sep == "" { - return nil, nameErr(b, "empty separator") - } - var i int - if b.Name()[0] == 'p' { - i = strings.Index(recv, sep) // partition - } else { - i = strings.LastIndex(recv, sep) // rpartition - } - tuple := make(Tuple, 0, 3) - if i < 0 { - if b.Name()[0] == 'p' { - tuple = append(tuple, String(recv), String(""), String("")) - } else { - tuple = append(tuple, String(""), String(""), String(recv)) - } - } else { - tuple = append(tuple, String(recv[:i]), String(sep), String(recv[i+len(sep):])) - } - return tuple, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·replace -func string_replace(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var old, new string - count := -1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &old, &new, &count); err != nil { - return nil, err - } - return String(strings.Replace(recv, old, new, count)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rfind -func string_rfind(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, true, true) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rindex -func string_rindex(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, false, true) -} - -// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·startswith -// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·endswith -func string_startswith(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - var start, end Value = None, None - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &x, &start, &end); err != nil { - return nil, err - } - - // compute effective substring. - s := string(b.Receiver().(String)) - if start, end, err := indices(start, end, len(s)); err != nil { - return nil, nameErr(b, err) - } else { - if end < start { - end = start // => empty result - } - s = s[start:end] - } - - f := strings.HasPrefix - if b.Name()[0] == 'e' { // endswith - f = strings.HasSuffix - } - - switch x := x.(type) { - case Tuple: - for i, x := range x { - prefix, ok := AsString(x) - if !ok { - return nil, fmt.Errorf("%s: want string, got %s, for element %d", - b.Name(), x.Type(), i) - } - if f(s, prefix) { - return True, nil - } - } - return False, nil - case String: - return Bool(f(s, string(x))), nil - } - return nil, fmt.Errorf("%s: got %s, want string or tuple of string", b.Name(), x.Type()) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·strip -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lstrip -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rstrip -func string_strip(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var chars string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &chars); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - var s string - switch b.Name()[0] { - case 's': // strip - if chars != "" { - s = strings.Trim(recv, chars) - } else { - s = strings.TrimSpace(recv) - } - case 'l': // lstrip - if chars != "" { - s = strings.TrimLeft(recv, chars) - } else { - s = strings.TrimLeftFunc(recv, unicode.IsSpace) - } - case 'r': // rstrip - if chars != "" { - s = strings.TrimRight(recv, chars) - } else { - s = strings.TrimRightFunc(recv, unicode.IsSpace) - } - } - return String(s), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·title -func string_title(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - - s := string(b.Receiver().(String)) - - // Python semantics differ from x==strings.{To,}Title(x) in Go: - // "uppercase characters may only follow uncased characters and - // lowercase characters only cased ones." - buf := new(strings.Builder) - buf.Grow(len(s)) - var prevCased bool - for _, r := range s { - if prevCased { - r = unicode.ToLower(r) - } else { - r = unicode.ToTitle(r) - } - prevCased = isCasedRune(r) - buf.WriteRune(r) - } - return String(buf.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·upper -func string_upper(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return String(strings.ToUpper(string(b.Receiver().(String)))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·split -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rsplit -func string_split(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var sep_ Value - maxsplit := -1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &sep_, &maxsplit); err != nil { - return nil, err - } - - var res []string - - if sep_ == nil || sep_ == None { - // special case: split on whitespace - if maxsplit < 0 { - res = strings.Fields(recv) - } else if b.Name() == "split" { - res = splitspace(recv, maxsplit) - } else { // rsplit - res = rsplitspace(recv, maxsplit) - } - - } else if sep, ok := AsString(sep_); ok { - if sep == "" { - return nil, fmt.Errorf("split: empty separator") - } - // usual case: split on non-empty separator - if maxsplit < 0 { - res = strings.Split(recv, sep) - } else if b.Name() == "split" { - res = strings.SplitN(recv, sep, maxsplit+1) - } else { // rsplit - res = strings.Split(recv, sep) - if excess := len(res) - maxsplit; excess > 0 { - res[0] = strings.Join(res[:excess], sep) - res = append(res[:1], res[excess:]...) - } - } - - } else { - return nil, fmt.Errorf("split: got %s for separator, want string", sep_.Type()) - } - - list := make([]Value, len(res)) - for i, x := range res { - list[i] = String(x) - } - return NewList(list), nil -} - -// Precondition: max >= 0. -func rsplitspace(s string, max int) []string { - res := make([]string, 0, max+1) - end := -1 // index of field end, or -1 in a region of spaces. - for i := len(s); i > 0; { - r, sz := utf8.DecodeLastRuneInString(s[:i]) - if unicode.IsSpace(r) { - if end >= 0 { - if len(res) == max { - break // let this field run to the start - } - res = append(res, s[i:end]) - end = -1 - } - } else if end < 0 { - end = i - } - i -= sz - } - if end >= 0 { - res = append(res, s[:end]) - } - - resLen := len(res) - for i := 0; i < resLen/2; i++ { - res[i], res[resLen-1-i] = res[resLen-1-i], res[i] - } - - return res -} - -// Precondition: max >= 0. -func splitspace(s string, max int) []string { - var res []string - start := -1 // index of field start, or -1 in a region of spaces - for i, r := range s { - if unicode.IsSpace(r) { - if start >= 0 { - if len(res) == max { - break // let this field run to the end - } - res = append(res, s[start:i]) - start = -1 - } - } else if start == -1 { - start = i - } - } - if start >= 0 { - res = append(res, s[start:]) - } - return res -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·splitlines -func string_splitlines(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var keepends bool - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &keepends); err != nil { - return nil, err - } - var lines []string - if s := string(b.Receiver().(String)); s != "" { - // TODO(adonovan): handle CRLF correctly. - if keepends { - lines = strings.SplitAfter(s, "\n") - } else { - lines = strings.Split(s, "\n") - } - if strings.HasSuffix(s, "\n") { - lines = lines[:len(lines)-1] - } - } - list := make([]Value, len(lines)) - for i, x := range lines { - list[i] = String(x) - } - return NewList(list), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#set·union. -func set_union(b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - union, err := b.Receiver().(*Set).Union(iter) - if err != nil { - return nil, nameErr(b, err) - } - return union, nil -} - -// Common implementation of string_{r}{find,index}. -func string_find_impl(b *Builtin, args Tuple, kwargs []Tuple, allowError, last bool) (Value, error) { - var sub string - var start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { - return nil, err - } - - s := string(b.Receiver().(String)) - start, end, err := indices(start_, end_, len(s)) - if err != nil { - return nil, nameErr(b, err) - } - var slice string - if start < end { - slice = s[start:end] - } - - var i int - if last { - i = strings.LastIndex(slice, sub) - } else { - i = strings.Index(slice, sub) - } - if i < 0 { - if !allowError { - return nil, nameErr(b, "substring not found") - } - return MakeInt(-1), nil - } - return MakeInt(i + start), nil -} - -// Common implementation of builtin dict function and dict.update method. -// Precondition: len(updates) == 0 or 1. -func updateDict(dict *Dict, updates Tuple, kwargs []Tuple) error { - if len(updates) == 1 { - switch updates := updates[0].(type) { - case IterableMapping: - // Iterate over dict's key/value pairs, not just keys. - for _, item := range updates.Items() { - if err := dict.SetKey(item[0], item[1]); err != nil { - return err // dict is frozen - } - } - default: - // all other sequences - iter := Iterate(updates) - if iter == nil { - return fmt.Errorf("got %s, want iterable", updates.Type()) - } - defer iter.Done() - var pair Value - for i := 0; iter.Next(&pair); i++ { - iter2 := Iterate(pair) - if iter2 == nil { - return fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) - - } - defer iter2.Done() - len := Len(pair) - if len < 0 { - return fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) - } else if len != 2 { - return fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) - } - var k, v Value - iter2.Next(&k) - iter2.Next(&v) - if err := dict.SetKey(k, v); err != nil { - return err - } - } - } - } - - // Then add the kwargs. - before := dict.Len() - for _, pair := range kwargs { - if err := dict.SetKey(pair[0], pair[1]); err != nil { - return err // dict is frozen - } - } - // In the common case, each kwarg will add another dict entry. - // If that's not so, check whether it is because there was a duplicate kwarg. - if dict.Len() < before+len(kwargs) { - keys := make(map[String]bool, len(kwargs)) - for _, kv := range kwargs { - k := kv[0].(String) - if keys[k] { - return fmt.Errorf("duplicate keyword arg: %v", k) - } - keys[k] = true - } - } - - return nil -} - -// nameErr returns an error message of the form "name: msg" -// where name is b.Name() and msg is a string or error. -func nameErr(b *Builtin, msg interface{}) error { - return fmt.Errorf("%s: %v", b.Name(), msg) -} diff --git a/vendor/github.com/k14s/starlark-go/starlark/profile.go b/vendor/github.com/k14s/starlark-go/starlark/profile.go deleted file mode 100644 index f6d39a828..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/profile.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2019 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -// This file defines a simple execution-time profiler for Starlark. -// It measures the wall time spent executing Starlark code, and emits a -// gzipped protocol message in pprof format (github.com/google/pprof). -// -// When profiling is enabled, the interpreter calls the profiler to -// indicate the start and end of each "span" or time interval. A leaf -// function (whether Go or Starlark) has a single span. A function that -// calls another function has spans for each interval in which it is the -// top of the stack. (A LOAD instruction also ends a span.) -// -// At the start of a span, the interpreter records the current time in -// the thread's topmost frame. At the end of the span, it obtains the -// time again and subtracts the span start time. The difference is added -// to an accumulator variable in the thread. If the accumulator exceeds -// some fixed quantum (10ms, say), the profiler records the current call -// stack and sends it to the profiler goroutine, along with the number -// of quanta, which are subtracted. For example, if the accumulator -// holds 3ms and then a completed span adds 25ms to it, its value is 28ms, -// which exceeeds 10ms. The profiler records a stack with the value 20ms -// (2 quanta), and the accumulator is left with 8ms. -// -// The profiler goroutine converts the stacks into the pprof format and -// emits a gzip-compressed protocol message to the designated output -// file. We use a hand-written streaming proto encoder to avoid -// dependencies on pprof and proto, and to avoid the need to -// materialize the profile data structure in memory. -// -// A limitation of this profiler is that it measures wall time, which -// does not necessarily correspond to CPU time. A CPU profiler requires -// that only running (not runnable) threads are sampled; this is -// commonly achieved by having the kernel deliver a (PROF) signal to an -// arbitrary running thread, through setitimer(2). The CPU profiler in the -// Go runtime uses this mechanism, but it is not possible for a Go -// application to register a SIGPROF handler, nor is it possible for a -// Go handler for some other signal to read the stack pointer of -// the interrupted thread. -// -// Two caveats: -// (1) it is tempting to send the leaf Frame directly to the profiler -// goroutine instead of making a copy of the stack, since a Frame is a -// spaghetti stack--a linked list. However, as soon as execution -// resumes, the stack's Frame.pc values may be mutated, so Frames are -// not safe to share with the asynchronous profiler goroutine. -// (2) it is tempting to use Callables as keys in a map when tabulating -// the pprof protocols's Function entities. However, we cannot assume -// that Callables are valid map keys, and furthermore we must not -// pin function values in memory indefinitely as this may cause lambda -// values to keep their free variables live much longer than necessary. - -// TODO(adonovan): -// - make Start/Stop fully thread-safe. -// - fix the pc hack. -// - experiment with other values of quantum. - -import ( - "bufio" - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "log" - "reflect" - "sync/atomic" - "time" - "unsafe" - - "github.com/k14s/starlark-go/syntax" -) - -// StartProfile enables time profiling of all Starlark threads, -// and writes a profile in pprof format to w. -// It must be followed by a call to StopProfiler to stop -// the profiler and finalize the profile. -// -// StartProfile returns an error if profiling was already enabled. -// -// StartProfile must not be called concurrently with Starlark execution. -func StartProfile(w io.Writer) error { - if !atomic.CompareAndSwapUint32(&profiler.on, 0, 1) { - return fmt.Errorf("profiler already running") - } - - // TODO(adonovan): make the API fully concurrency-safe. - // The main challenge is racy reads/writes of profiler.events, - // and of send/close races on the channel it refers to. - // It's easy to solve them with a mutex but harder to do - // it efficiently. - - profiler.events = make(chan *profEvent, 1) - profiler.done = make(chan error) - - go profile(w) - - return nil -} - -// StopProfiler stops the profiler started by a prior call to -// StartProfile and finalizes the profile. It returns an error if the -// profile could not be completed. -// -// StopProfiler must not be called concurrently with Starlark execution. -func StopProfile() error { - // Terminate the profiler goroutine and get its result. - close(profiler.events) - err := <-profiler.done - - profiler.done = nil - profiler.events = nil - atomic.StoreUint32(&profiler.on, 0) - - return err -} - -// globals -var profiler struct { - on uint32 // nonzero => profiler running - events chan *profEvent // profile events from interpreter threads - done chan error // indicates profiler goroutine is ready -} - -func (thread *Thread) beginProfSpan() { - if profiler.events == nil { - return // profiling not enabled - } - - thread.frameAt(0).spanStart = nanotime() -} - -// TODO(adonovan): experiment with smaller values, -// which trade space and time for greater precision. -const quantum = 10 * time.Millisecond - -func (thread *Thread) endProfSpan() { - if profiler.events == nil { - return // profiling not enabled - } - - // Add the span to the thread's accumulator. - thread.proftime += time.Duration(nanotime() - thread.frameAt(0).spanStart) - if thread.proftime < quantum { - return - } - - // Only record complete quanta. - n := thread.proftime / quantum - thread.proftime -= n * quantum - - // Copy the stack. - // (We can't save thread.frame because its pc will change.) - ev := &profEvent{ - thread: thread, - time: n * quantum, - } - ev.stack = ev.stackSpace[:0] - for i := range thread.stack { - fr := thread.frameAt(i) - ev.stack = append(ev.stack, profFrame{ - pos: fr.Position(), - fn: fr.Callable(), - pc: fr.pc, - }) - } - - profiler.events <- ev -} - -type profEvent struct { - thread *Thread // currently unused - time time.Duration - stack []profFrame - stackSpace [8]profFrame // initial space for stack -} - -type profFrame struct { - fn Callable // don't hold this live for too long (prevents GC of lambdas) - pc uint32 // program counter (Starlark frames only) - pos syntax.Position // position of pc within this frame -} - -// profile is the profiler goroutine. -// It runs until StopProfiler is called. -func profile(w io.Writer) { - // Field numbers from pprof protocol. - // See https://github.com/google/pprof/blob/master/proto/profile.proto - const ( - Profile_sample_type = 1 // repeated ValueType - Profile_sample = 2 // repeated Sample - Profile_mapping = 3 // repeated Mapping - Profile_location = 4 // repeated Location - Profile_function = 5 // repeated Function - Profile_string_table = 6 // repeated string - Profile_time_nanos = 9 // int64 - Profile_duration_nanos = 10 // int64 - Profile_period_type = 11 // ValueType - Profile_period = 12 // int64 - - ValueType_type = 1 // int64 - ValueType_unit = 2 // int64 - - Sample_location_id = 1 // repeated uint64 - Sample_value = 2 // repeated int64 - Sample_label = 3 // repeated Label - - Label_key = 1 // int64 - Label_str = 2 // int64 - Label_num = 3 // int64 - Label_num_unit = 4 // int64 - - Location_id = 1 // uint64 - Location_mapping_id = 2 // uint64 - Location_address = 3 // uint64 - Location_line = 4 // repeated Line - - Line_function_id = 1 // uint64 - Line_line = 2 // int64 - - Function_id = 1 // uint64 - Function_name = 2 // int64 - Function_system_name = 3 // int64 - Function_filename = 4 // int64 - Function_start_line = 5 // int64 - ) - - bufw := bufio.NewWriter(w) // write file in 4KB (not 240B flate-sized) chunks - gz := gzip.NewWriter(bufw) - enc := protoEncoder{w: gz} - - // strings - stringIndex := make(map[string]int64) - str := func(s string) int64 { - i, ok := stringIndex[s] - if !ok { - i = int64(len(stringIndex)) - enc.string(Profile_string_table, s) - stringIndex[s] = i - } - return i - } - str("") // entry 0 - - // functions - // - // function returns the ID of a Callable for use in Line.FunctionId. - // The ID is the same as the function's logical address, - // which is supplied by the caller to avoid the need to recompute it. - functionId := make(map[uintptr]uint64) - function := func(fn Callable, addr uintptr) uint64 { - id, ok := functionId[addr] - if !ok { - id = uint64(addr) - - var pos syntax.Position - if fn, ok := fn.(callableWithPosition); ok { - pos = fn.Position() - } - - name := fn.Name() - if name == "" { - name = pos.Filename() - } - - nameIndex := str(name) - - fun := new(bytes.Buffer) - funenc := protoEncoder{w: fun} - funenc.uint(Function_id, id) - funenc.int(Function_name, nameIndex) - funenc.int(Function_system_name, nameIndex) - funenc.int(Function_filename, str(pos.Filename())) - funenc.int(Function_start_line, int64(pos.Line)) - enc.bytes(Profile_function, fun.Bytes()) - - functionId[addr] = id - } - return id - } - - // locations - // - // location returns the ID of the location denoted by fr. - // For Starlark frames, this is the Frame pc. - locationId := make(map[uintptr]uint64) - location := func(fr profFrame) uint64 { - fnAddr := profFuncAddr(fr.fn) - - // For Starlark functions, the frame position - // represents the current PC value. - // Mix it into the low bits of the address. - // This is super hacky and may result in collisions - // in large functions or if functions are numerous. - // TODO(adonovan): fix: try making this cleaner by treating - // each bytecode segment as a Profile.Mapping. - pcAddr := fnAddr - if _, ok := fr.fn.(*Function); ok { - pcAddr = (pcAddr << 16) ^ uintptr(fr.pc) - } - - id, ok := locationId[pcAddr] - if !ok { - id = uint64(pcAddr) - - line := new(bytes.Buffer) - lineenc := protoEncoder{w: line} - lineenc.uint(Line_function_id, function(fr.fn, fnAddr)) - lineenc.int(Line_line, int64(fr.pos.Line)) - loc := new(bytes.Buffer) - locenc := protoEncoder{w: loc} - locenc.uint(Location_id, id) - locenc.uint(Location_address, uint64(pcAddr)) - locenc.bytes(Location_line, line.Bytes()) - enc.bytes(Profile_location, loc.Bytes()) - - locationId[pcAddr] = id - } - return id - } - - wallNanos := new(bytes.Buffer) - wnenc := protoEncoder{w: wallNanos} - wnenc.int(ValueType_type, str("wall")) - wnenc.int(ValueType_unit, str("nanoseconds")) - - // informational fields of Profile - enc.bytes(Profile_sample_type, wallNanos.Bytes()) - enc.int(Profile_period, quantum.Nanoseconds()) // magnitude of sampling period - enc.bytes(Profile_period_type, wallNanos.Bytes()) // dimension and unit of period - enc.int(Profile_time_nanos, time.Now().UnixNano()) // start (real) time of profile - - startNano := nanotime() - - // Read profile events from the channel - // until it is closed by StopProfiler. - for e := range profiler.events { - sample := new(bytes.Buffer) - sampleenc := protoEncoder{w: sample} - sampleenc.int(Sample_value, e.time.Nanoseconds()) // wall nanoseconds - for _, fr := range e.stack { - sampleenc.uint(Sample_location_id, location(fr)) - } - enc.bytes(Profile_sample, sample.Bytes()) - } - - endNano := nanotime() - enc.int(Profile_duration_nanos, endNano-startNano) - - err := gz.Close() // Close reports any prior write error - if flushErr := bufw.Flush(); err == nil { - err = flushErr - } - profiler.done <- err -} - -// nanotime returns the time in nanoseconds since epoch. -// It is implemented by runtime.nanotime using the linkname hack; -// runtime.nanotime is defined for all OSs/ARCHS and uses the -// monotonic system clock, which there is no portable way to access. -// Should that function ever go away, these alternatives exist: -// -// // POSIX only. REALTIME not MONOTONIC. 17ns. -// var tv syscall.Timeval -// syscall.Gettimeofday(&tv) // can't fail -// return tv.Nano() -// -// // Portable. REALTIME not MONOTONIC. 46ns. -// return time.Now().Nanoseconds() -// -// // POSIX only. Adds a dependency. -// import "golang.org/x/sys/unix" -// var ts unix.Timespec -// unix.ClockGettime(CLOCK_MONOTONIC, &ts) // can't fail -// return unix.TimespecToNsec(ts) -// -//go:linkname nanotime runtime.nanotime -func nanotime() int64 - -// profFuncAddr returns the canonical "address" -// of a Callable for use by the profiler. -func profFuncAddr(fn Callable) uintptr { - switch fn := fn.(type) { - case *Builtin: - return reflect.ValueOf(fn.fn).Pointer() - case *Function: - return uintptr(unsafe.Pointer(fn.funcode)) - } - - // User-defined callable types are typically of - // of kind pointer-to-struct. Handle them specially. - if v := reflect.ValueOf(fn); v.Type().Kind() == reflect.Ptr { - return v.Pointer() - } - - // Address zero is reserved by the protocol. - // Use 1 for callables we don't recognize. - log.Printf("Starlark profiler: no address for Callable %T", fn) - return 1 -} - -// We encode the protocol message by hand to avoid making -// the interpreter depend on both github.com/google/pprof -// and github.com/golang/protobuf. -// -// This also avoids the need to materialize a protocol message object -// tree of unbounded size and serialize it all at the end. -// The pprof format appears to have been designed to -// permit streaming implementations such as this one. -// -// See https://developers.google.com/protocol-buffers/docs/encoding. -type protoEncoder struct { - w io.Writer // *bytes.Buffer or *gzip.Writer - tmp [binary.MaxVarintLen64]byte -} - -func (e *protoEncoder) uvarint(x uint64) { - n := binary.PutUvarint(e.tmp[:], x) - e.w.Write(e.tmp[:n]) -} - -func (e *protoEncoder) tag(field, wire uint) { - e.uvarint(uint64(field<<3 | wire)) -} - -func (e *protoEncoder) string(field uint, s string) { - e.tag(field, 2) // length-delimited - e.uvarint(uint64(len(s))) - io.WriteString(e.w, s) -} - -func (e *protoEncoder) bytes(field uint, b []byte) { - e.tag(field, 2) // length-delimited - e.uvarint(uint64(len(b))) - e.w.Write(b) -} - -func (e *protoEncoder) uint(field uint, x uint64) { - e.tag(field, 0) // varint - e.uvarint(x) -} - -func (e *protoEncoder) int(field uint, x int64) { - e.tag(field, 0) // varint - e.uvarint(uint64(x)) -} diff --git a/vendor/github.com/k14s/starlark-go/starlark/unpack.go b/vendor/github.com/k14s/starlark-go/starlark/unpack.go deleted file mode 100644 index 6c870f951..000000000 --- a/vendor/github.com/k14s/starlark-go/starlark/unpack.go +++ /dev/null @@ -1,258 +0,0 @@ -package starlark - -// This file defines the Unpack helper functions used by -// built-in functions to interpret their call arguments. - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// UnpackArgs unpacks the positional and keyword arguments into the -// supplied parameter variables. pairs is an alternating list of names -// and pointers to variables. -// -// If the variable is a bool, int, string, *List, *Dict, Callable, -// Iterable, or user-defined implementation of Value, -// UnpackArgs performs the appropriate type check. -// An int uses the AsInt32 check. -// If the parameter name ends with "?", -// it and all following parameters are optional. -// -// If the variable implements Value, UnpackArgs may call -// its Type() method while constructing the error message. -// -// Beware: an optional *List, *Dict, Callable, Iterable, or Value variable that is -// not assigned is not a valid Starlark Value, so the caller must -// explicitly handle such cases by interpreting nil as None or some -// computed default. -func UnpackArgs(fnname string, args Tuple, kwargs []Tuple, pairs ...interface{}) error { - nparams := len(pairs) / 2 - var defined intset - defined.init(nparams) - - paramName := func(x interface{}) string { // (no free variables) - name := x.(string) - if name[len(name)-1] == '?' { - name = name[:len(name)-1] - } - return name - } - - // positional arguments - if len(args) > nparams { - return fmt.Errorf("%s: got %d arguments, want at most %d", - fnname, len(args), nparams) - } - for i, arg := range args { - defined.set(i) - if err := unpackOneArg(arg, pairs[2*i+1]); err != nil { - name := paramName(pairs[2*i]) - return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) - } - } - - // keyword arguments -kwloop: - for _, item := range kwargs { - name, arg := item[0].(String), item[1] - for i := 0; i < nparams; i++ { - if paramName(pairs[2*i]) == string(name) { - // found it - if defined.set(i) { - return fmt.Errorf("%s: got multiple values for keyword argument %s", - fnname, name) - } - ptr := pairs[2*i+1] - if err := unpackOneArg(arg, ptr); err != nil { - return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) - } - continue kwloop - } - } - return fmt.Errorf("%s: unexpected keyword argument %s", fnname, name) - } - - // Check that all non-optional parameters are defined. - // (We needn't check the first len(args).) - for i := len(args); i < nparams; i++ { - name := pairs[2*i].(string) - if strings.HasSuffix(name, "?") { - break // optional - } - if !defined.get(i) { - return fmt.Errorf("%s: missing argument for %s", fnname, name) - } - } - - return nil -} - -// UnpackPositionalArgs unpacks the positional arguments into -// corresponding variables. Each element of vars is a pointer; see -// UnpackArgs for allowed types and conversions. -// -// UnpackPositionalArgs reports an error if the number of arguments is -// less than min or greater than len(vars), if kwargs is nonempty, or if -// any conversion fails. -func UnpackPositionalArgs(fnname string, args Tuple, kwargs []Tuple, min int, vars ...interface{}) error { - if len(kwargs) > 0 { - return fmt.Errorf("%s: unexpected keyword arguments", fnname) - } - max := len(vars) - if len(args) < min { - var atleast string - if min < max { - atleast = "at least " - } - return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atleast, min) - } - if len(args) > max { - var atmost string - if max > min { - atmost = "at most " - } - return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atmost, max) - } - for i, arg := range args { - if err := unpackOneArg(arg, vars[i]); err != nil { - return fmt.Errorf("%s: for parameter %d: %s", fnname, i+1, err) - } - } - return nil -} - -func unpackOneArg(v Value, ptr interface{}) error { - // On failure, don't clobber *ptr. - switch ptr := ptr.(type) { - case *Value: - *ptr = v - case *string: - s, ok := AsString(v) - if !ok { - return fmt.Errorf("got %s, want string", v.Type()) - } - *ptr = s - case *bool: - b, ok := v.(Bool) - if !ok { - return fmt.Errorf("got %s, want bool", v.Type()) - } - *ptr = bool(b) - case *int: - i, err := AsInt32(v) - if err != nil { - return err - } - *ptr = i - case **List: - list, ok := v.(*List) - if !ok { - return fmt.Errorf("got %s, want list", v.Type()) - } - *ptr = list - case **Dict: - dict, ok := v.(*Dict) - if !ok { - return fmt.Errorf("got %s, want dict", v.Type()) - } - *ptr = dict - case *Callable: - f, ok := v.(Callable) - if !ok { - return fmt.Errorf("got %s, want callable", v.Type()) - } - *ptr = f - case *Iterable: - it, ok := v.(Iterable) - if !ok { - return fmt.Errorf("got %s, want iterable", v.Type()) - } - *ptr = it - default: - // v must have type *V, where V is some subtype of starlark.Value. - ptrv := reflect.ValueOf(ptr) - if ptrv.Kind() != reflect.Ptr { - log.Panicf("internal error: not a pointer: %T", ptr) - } - paramVar := ptrv.Elem() - if !reflect.TypeOf(v).AssignableTo(paramVar.Type()) { - // The value is not assignable to the variable. - - // Detect a possible bug in the Go program that called Unpack: - // If the variable *ptr is not a subtype of Value, - // no value of v can possibly work. - if !paramVar.Type().AssignableTo(reflect.TypeOf(new(Value)).Elem()) { - log.Panicf("pointer element type does not implement Value: %T", ptr) - } - - // Report Starlark dynamic type error. - // - // We prefer the Starlark Value.Type name over - // its Go reflect.Type name, but calling the - // Value.Type method on the variable is not safe - // in general. If the variable is an interface, - // the call will fail. Even if the variable has - // a concrete type, it might not be safe to call - // Type() on a zero instance. Thus we must use - // recover. - - // Default to Go reflect.Type name - paramType := paramVar.Type().String() - - // Attempt to call Value.Type method. - func() { - defer func() { recover() }() - paramType = paramVar.MethodByName("Type").Call(nil)[0].String() - }() - return fmt.Errorf("got %s, want %s", v.Type(), paramType) - } - paramVar.Set(reflect.ValueOf(v)) - } - return nil -} - -type intset struct { - small uint64 // bitset, used if n < 64 - large map[int]bool // set, used if n >= 64 -} - -func (is *intset) init(n int) { - if n >= 64 { - is.large = make(map[int]bool) - } -} - -func (is *intset) set(i int) (prev bool) { - if is.large == nil { - prev = is.small&(1< Hash(x) == Hash(y). - // Hash may fail if the value's type is not hashable, or if the value - // contains a non-hashable value. The hash is used only by dictionaries and - // is not exposed to the Starlark program. - Hash() (uint32, error) -} - -// A Comparable is a value that defines its own equivalence relation and -// perhaps ordered comparisons. -type Comparable interface { - Value - // CompareSameType compares one value to another of the same Type(). - // The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. - // CompareSameType returns an error if an ordered comparison was - // requested for a type that does not support it. - // - // Implementations that recursively compare subcomponents of - // the value should use the CompareDepth function, not Compare, to - // avoid infinite recursion on cyclic structures. - // - // The depth parameter is used to bound comparisons of cyclic - // data structures. Implementations should decrement depth - // before calling CompareDepth and should return an error if depth - // < 1. - // - // Client code should not call this method. Instead, use the - // standalone Compare or Equals functions, which are defined for - // all pairs of operands. - CompareSameType(op syntax.Token, y Value, depth int) (bool, error) -} - -var ( - _ Comparable = None - _ Comparable = Int{} - _ Comparable = False - _ Comparable = Float(0) - _ Comparable = String("") - _ Comparable = (*Dict)(nil) - _ Comparable = (*List)(nil) - _ Comparable = Tuple(nil) - _ Comparable = (*Set)(nil) -) - -// A Callable value f may be the operand of a function call, f(x). -// -// Clients should use the Call function, never the CallInternal method. -type Callable interface { - Value - Name() string - CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) -} - -type callableWithPosition interface { - Callable - Position() syntax.Position -} - -var ( - _ Callable = (*Builtin)(nil) - _ Callable = (*Function)(nil) - _ callableWithPosition = (*Function)(nil) -) - -// An Iterable abstracts a sequence of values. -// An iterable value may be iterated over by a 'for' loop or used where -// any other Starlark iterable is allowed. Unlike a Sequence, the length -// of an Iterable is not necessarily known in advance of iteration. -type Iterable interface { - Value - Iterate() Iterator // must be followed by call to Iterator.Done -} - -// A Sequence is a sequence of values of known length. -type Sequence interface { - Iterable - Len() int -} - -var ( - _ Sequence = (*Dict)(nil) - _ Sequence = (*Set)(nil) -) - -// An Indexable is a sequence of known length that supports efficient random access. -// It is not necessarily iterable. -type Indexable interface { - Value - Index(i int) Value // requires 0 <= i < Len() - Len() int -} - -// A Sliceable is a sequence that can be cut into pieces with the slice operator (x[i:j:step]). -// -// All native indexable objects are sliceable. -// This is a separate interface for backwards-compatibility. -type Sliceable interface { - Indexable - // For positive strides (step > 0), 0 <= start <= end <= n. - // For negative strides (step < 0), -1 <= end <= start < n. - // The caller must ensure that the start and end indices are valid - // and that step is non-zero. - Slice(start, end, step int) Value -} - -// A HasSetIndex is an Indexable value whose elements may be assigned (x[i] = y). -// -// The implementation should not add Len to a negative index as the -// evaluator does this before the call. -type HasSetIndex interface { - Indexable - SetIndex(index int, v Value) error -} - -var ( - _ HasSetIndex = (*List)(nil) - _ Indexable = Tuple(nil) - _ Indexable = String("") - _ Sliceable = Tuple(nil) - _ Sliceable = String("") - _ Sliceable = (*List)(nil) -) - -// An Iterator provides a sequence of values to the caller. -// -// The caller must call Done when the iterator is no longer needed. -// Operations that modify a sequence will fail if it has active iterators. -// -// Example usage: -// -// iter := iterable.Iterator() -// defer iter.Done() -// var x Value -// for iter.Next(&x) { -// ... -// } -// -type Iterator interface { - // If the iterator is exhausted, Next returns false. - // Otherwise it sets *p to the current element of the sequence, - // advances the iterator, and returns true. - Next(p *Value) bool - Done() -} - -// A Mapping is a mapping from keys to values, such as a dictionary. -// -// If a type satisfies both Mapping and Iterable, the iterator yields -// the keys of the mapping. -type Mapping interface { - Value - // Get returns the value corresponding to the specified key, - // or !found if the mapping does not contain the key. - // - // Get also defines the behavior of "v in mapping". - // The 'in' operator reports the 'found' component, ignoring errors. - Get(Value) (v Value, found bool, err error) -} - -// An IterableMapping is a mapping that supports key enumeration. -type IterableMapping interface { - Mapping - Iterate() Iterator // see Iterable interface - Items() []Tuple // a new slice containing all key/value pairs -} - -var _ IterableMapping = (*Dict)(nil) - -// A HasSetKey supports map update using x[k]=v syntax, like a dictionary. -type HasSetKey interface { - Mapping - SetKey(k, v Value) error -} - -var _ HasSetKey = (*Dict)(nil) - -// A HasBinary value may be used as either operand of these binary operators: -// + - * / // % in not in | & ^ << >> -// -// The Side argument indicates whether the receiver is the left or right operand. -// -// An implementation may decline to handle an operation by returning (nil, nil). -// For this reason, clients should always call the standalone Binary(op, x, y) -// function rather than calling the method directly. -type HasBinary interface { - Value - Binary(op syntax.Token, y Value, side Side) (Value, error) -} - -type Side bool - -const ( - Left Side = false - Right Side = true -) - -// A HasUnary value may be used as the operand of these unary operators: -// + - ~ -// -// An implementation may decline to handle an operation by returning (nil, nil). -// For this reason, clients should always call the standalone Unary(op, x) -// function rather than calling the method directly. -type HasUnary interface { - Value - Unary(op syntax.Token) (Value, error) -} - -// A HasAttrs value has fields or methods that may be read by a dot expression (y = x.f). -// Attribute names may be listed using the built-in 'dir' function. -// -// For implementation convenience, a result of (nil, nil) from Attr is -// interpreted as a "no such field or method" error. Implementations are -// free to return a more precise error. -type HasAttrs interface { - Value - Attr(name string) (Value, error) // returns (nil, nil) if attribute not present - AttrNames() []string // callers must not modify the result. -} - -var ( - _ HasAttrs = String("") - _ HasAttrs = new(List) - _ HasAttrs = new(Dict) - _ HasAttrs = new(Set) -) - -// A HasSetField value has fields that may be written by a dot expression (x.f = y). -// -// An implementation of SetField may return a NoSuchAttrError, -// in which case the runtime may augment the error message to -// warn of possible misspelling. -type HasSetField interface { - HasAttrs - SetField(name string, val Value) error -} - -// A NoSuchAttrError may be returned by an implementation of -// HasAttrs.Attr or HasSetField.SetField to indicate that no such field -// exists. In that case the runtime may augment the error message to -// warn of possible misspelling. -type NoSuchAttrError string - -func (e NoSuchAttrError) Error() string { return string(e) } - -// NoneType is the type of None. Its only legal value is None. -// (We represent it as a number, not struct{}, so that None may be constant.) -type NoneType byte - -const None = NoneType(0) - -func (NoneType) String() string { return "None" } -func (NoneType) Type() string { return "NoneType" } -func (NoneType) Freeze() {} // immutable -func (NoneType) Truth() Bool { return False } -func (NoneType) Hash() (uint32, error) { return 0, nil } -func (NoneType) CompareSameType(op syntax.Token, y Value, depth int) (bool, error) { - return threeway(op, 0), nil -} - -// Bool is the type of a Starlark bool. -type Bool bool - -const ( - False Bool = false - True Bool = true -) - -func (b Bool) String() string { - if b { - return "True" - } else { - return "False" - } -} -func (b Bool) Type() string { return "bool" } -func (b Bool) Freeze() {} // immutable -func (b Bool) Truth() Bool { return b } -func (b Bool) Hash() (uint32, error) { return uint32(b2i(bool(b))), nil } -func (x Bool) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Bool) - return threeway(op, b2i(bool(x))-b2i(bool(y))), nil -} - -// Float is the type of a Starlark float. -type Float float64 - -func (f Float) String() string { return strconv.FormatFloat(float64(f), 'g', 6, 64) } -func (f Float) Type() string { return "float" } -func (f Float) Freeze() {} // immutable -func (f Float) Truth() Bool { return f != 0.0 } -func (f Float) Hash() (uint32, error) { - // Equal float and int values must yield the same hash. - // TODO(adonovan): opt: if f is non-integral, and thus not equal - // to any Int, we can avoid the Int conversion and use a cheaper hash. - if isFinite(float64(f)) { - return finiteFloatToInt(f).Hash() - } - return 1618033, nil // NaN, +/-Inf -} - -func floor(f Float) Float { return Float(math.Floor(float64(f))) } - -// isFinite reports whether f represents a finite rational value. -// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0). -func isFinite(f float64) bool { - return math.Abs(f) <= math.MaxFloat64 -} - -func (x Float) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Float) - switch op { - case syntax.EQL: - return x == y, nil - case syntax.NEQ: - return x != y, nil - case syntax.LE: - return x <= y, nil - case syntax.LT: - return x < y, nil - case syntax.GE: - return x >= y, nil - case syntax.GT: - return x > y, nil - } - panic(op) -} - -func (f Float) rational() *big.Rat { return new(big.Rat).SetFloat64(float64(f)) } - -// AsFloat returns the float64 value closest to x. -// The f result is undefined if x is not a float or int. -func AsFloat(x Value) (f float64, ok bool) { - switch x := x.(type) { - case Float: - return float64(x), true - case Int: - return float64(x.Float()), true - } - return 0, false -} - -func (x Float) Mod(y Float) Float { return Float(math.Mod(float64(x), float64(y))) } - -// Unary implements the operations +float and -float. -func (f Float) Unary(op syntax.Token) (Value, error) { - switch op { - case syntax.MINUS: - return -f, nil - case syntax.PLUS: - return +f, nil - } - return nil, nil -} - -// String is the type of a Starlark string. -// -// A String encapsulates an an immutable sequence of bytes, -// but strings are not directly iterable. Instead, iterate -// over the result of calling one of these four methods: -// codepoints, codepoint_ords, elems, elem_ords. -// -// Warning: the contract of the Value interface's String method is that -// it returns the value printed in Starlark notation, -// so s.String() or fmt.Sprintf("%s", s) returns a quoted string. -// Use string(s) or s.GoString() or fmt.Sprintf("%#v", s) to obtain the raw contents -// of a Starlark string as a Go string. -type String string - -func (s String) String() string { return strconv.Quote(string(s)) } -func (s String) GoString() string { return string(s) } -func (s String) Type() string { return "string" } -func (s String) Freeze() {} // immutable -func (s String) Truth() Bool { return len(s) > 0 } -func (s String) Hash() (uint32, error) { return hashString(string(s)), nil } -func (s String) Len() int { return len(s) } // bytes -func (s String) Index(i int) Value { return s[i : i+1] } - -func (s String) Slice(start, end, step int) Value { - if step == 1 { - return s[start:end] - } - - sign := signum(step) - var str []byte - for i := start; signum(end-i) == sign; i += step { - str = append(str, s[i]) - } - return String(str) -} - -func (s String) Attr(name string) (Value, error) { return builtinAttr(s, name, stringMethods) } -func (s String) AttrNames() []string { return builtinAttrNames(stringMethods) } - -func (x String) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(String) - return threeway(op, strings.Compare(string(x), string(y))), nil -} - -func AsString(x Value) (string, bool) { v, ok := x.(String); return string(v), ok } - -// A stringIterable is an iterable whose iterator yields a sequence of -// either Unicode code points or elements (bytes), -// either numerically or as successive substrings. -type stringIterable struct { - s String - ords bool - codepoints bool -} - -var _ Iterable = (*stringIterable)(nil) - -func (si stringIterable) String() string { - var etype string - if si.codepoints { - etype = "codepoint" - } else { - etype = "elem" - } - if si.ords { - return si.s.String() + "." + etype + "_ords()" - } else { - return si.s.String() + "." + etype + "s()" - } -} -func (si stringIterable) Type() string { - if si.codepoints { - return "codepoints" - } else { - return "elems" - } -} -func (si stringIterable) Freeze() {} // immutable -func (si stringIterable) Truth() Bool { return True } -func (si stringIterable) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", si.Type()) } -func (si stringIterable) Iterate() Iterator { return &stringIterator{si, 0} } - -type stringIterator struct { - si stringIterable - i int -} - -func (it *stringIterator) Next(p *Value) bool { - s := it.si.s[it.i:] - if s == "" { - return false - } - if it.si.codepoints { - r, sz := utf8.DecodeRuneInString(string(s)) - if !it.si.ords { - *p = s[:sz] - } else { - *p = MakeInt(int(r)) - } - it.i += sz - } else { - b := int(s[0]) - if !it.si.ords { - *p = s[:1] - } else { - *p = MakeInt(b) - } - it.i += 1 - } - return true -} - -func (*stringIterator) Done() {} - -// A Function is a function defined by a Starlark def statement or lambda expression. -// The initialization behavior of a Starlark module is also represented by a Function. -type Function struct { - funcode *compile.Funcode - module *module - defaults Tuple - freevars Tuple -} - -// A module is the dynamic counterpart to a Program. -// All functions in the same program share a module. -type module struct { - program *compile.Program - predeclared StringDict - globals []Value - constants []Value -} - -// makeGlobalDict returns a new, unfrozen StringDict containing all global -// variables so far defined in the module. -func (m *module) makeGlobalDict() StringDict { - r := make(StringDict, len(m.program.Globals)) - for i, id := range m.program.Globals { - if v := m.globals[i]; v != nil { - r[id.Name] = v - } - } - return r -} - -func (fn *Function) Name() string { return fn.funcode.Name } // "lambda" for anonymous functions -func (fn *Function) Doc() string { return fn.funcode.Doc } -func (fn *Function) Hash() (uint32, error) { return hashString(fn.funcode.Name), nil } -func (fn *Function) Freeze() { fn.defaults.Freeze(); fn.freevars.Freeze() } -func (fn *Function) String() string { return toString(fn) } -func (fn *Function) Type() string { return "function" } -func (fn *Function) Truth() Bool { return true } - -// Globals returns a new, unfrozen StringDict containing all global -// variables so far defined in the function's module. -func (fn *Function) Globals() StringDict { return fn.module.makeGlobalDict() } - -func (fn *Function) Position() syntax.Position { return fn.funcode.Pos } -func (fn *Function) NumParams() int { return fn.funcode.NumParams } -func (fn *Function) NumKwonlyParams() int { return fn.funcode.NumKwonlyParams } - -// Param returns the name and position of the ith parameter, -// where 0 <= i < NumParams(). -// The *args and **kwargs parameters are at the end -// even if there were optional parameters after *args. -func (fn *Function) Param(i int) (string, syntax.Position) { - if i >= fn.NumParams() { - panic(i) - } - id := fn.funcode.Locals[i] - return id.Name, id.Pos -} -func (fn *Function) HasVarargs() bool { return fn.funcode.HasVarargs } -func (fn *Function) HasKwargs() bool { return fn.funcode.HasKwargs } - -// A Builtin is a function implemented in Go. -type Builtin struct { - name string - fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error) - recv Value // for bound methods (e.g. "".startswith) -} - -func (b *Builtin) Name() string { return b.name } -func (b *Builtin) Freeze() { - if b.recv != nil { - b.recv.Freeze() - } -} -func (b *Builtin) Hash() (uint32, error) { - h := hashString(b.name) - if b.recv != nil { - h ^= 5521 - } - return h, nil -} -func (b *Builtin) Receiver() Value { return b.recv } -func (b *Builtin) String() string { return toString(b) } -func (b *Builtin) Type() string { return "builtin_function_or_method" } -func (b *Builtin) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { - return b.fn(thread, b, args, kwargs) -} -func (b *Builtin) Truth() Bool { return true } - -// NewBuiltin returns a new 'builtin_function_or_method' value with the specified name -// and implementation. It compares unequal with all other values. -func NewBuiltin(name string, fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error)) *Builtin { - return &Builtin{name: name, fn: fn} -} - -// BindReceiver returns a new Builtin value representing a method -// closure, that is, a built-in function bound to a receiver value. -// -// In the example below, the value of f is the string.index -// built-in method bound to the receiver value "abc": -// -// f = "abc".index; f("a"); f("b") -// -// In the common case, the receiver is bound only during the call, -// but this still results in the creation of a temporary method closure: -// -// "abc".index("a") -// -func (b *Builtin) BindReceiver(recv Value) *Builtin { - return &Builtin{name: b.name, fn: b.fn, recv: recv} -} - -// A *Dict represents a Starlark dictionary. -// The zero value of Dict is a valid empty dictionary. -// If you know the exact final number of entries, -// it is more efficient to call NewDict. -type Dict struct { - ht hashtable -} - -// NewDict returns a set with initial space for -// at least size insertions before rehashing. -func NewDict(size int) *Dict { - dict := new(Dict) - dict.ht.init(size) - return dict -} - -func (d *Dict) Clear() error { return d.ht.clear() } -func (d *Dict) Delete(k Value) (v Value, found bool, err error) { return d.ht.delete(k) } -func (d *Dict) Get(k Value) (v Value, found bool, err error) { return d.ht.lookup(k) } -func (d *Dict) Items() []Tuple { return d.ht.items() } -func (d *Dict) Keys() []Value { return d.ht.keys() } -func (d *Dict) Len() int { return int(d.ht.len) } -func (d *Dict) Iterate() Iterator { return d.ht.iterate() } -func (d *Dict) SetKey(k, v Value) error { return d.ht.insert(k, v) } -func (d *Dict) String() string { return toString(d) } -func (d *Dict) Type() string { return "dict" } -func (d *Dict) Freeze() { d.ht.freeze() } -func (d *Dict) Truth() Bool { return d.Len() > 0 } -func (d *Dict) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: dict") } - -func (d *Dict) Attr(name string) (Value, error) { return builtinAttr(d, name, dictMethods) } -func (d *Dict) AttrNames() []string { return builtinAttrNames(dictMethods) } - -func (x *Dict) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*Dict) - switch op { - case syntax.EQL: - ok, err := dictsEqual(x, y, depth) - return ok, err - case syntax.NEQ: - ok, err := dictsEqual(x, y, depth) - return !ok, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func dictsEqual(x, y *Dict, depth int) (bool, error) { - if x.Len() != y.Len() { - return false, nil - } - for _, xitem := range x.Items() { - key, xval := xitem[0], xitem[1] - - if yval, found, _ := y.Get(key); !found { - return false, nil - } else if eq, err := EqualDepth(xval, yval, depth-1); err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil -} - -// A *List represents a Starlark list value. -type List struct { - elems []Value - frozen bool - itercount uint32 // number of active iterators (ignored if frozen) -} - -// NewList returns a list containing the specified elements. -// Callers should not subsequently modify elems. -func NewList(elems []Value) *List { return &List{elems: elems} } - -func (l *List) Freeze() { - if !l.frozen { - l.frozen = true - for _, elem := range l.elems { - elem.Freeze() - } - } -} - -// checkMutable reports an error if the list should not be mutated. -// verb+" list" should describe the operation. -func (l *List) checkMutable(verb string) error { - if l.frozen { - return fmt.Errorf("cannot %s frozen list", verb) - } - if l.itercount > 0 { - return fmt.Errorf("cannot %s list during iteration", verb) - } - return nil -} - -func (l *List) String() string { return toString(l) } -func (l *List) Type() string { return "list" } -func (l *List) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: list") } -func (l *List) Truth() Bool { return l.Len() > 0 } -func (l *List) Len() int { return len(l.elems) } -func (l *List) Index(i int) Value { return l.elems[i] } - -func (l *List) Slice(start, end, step int) Value { - if step == 1 { - elems := append([]Value{}, l.elems[start:end]...) - return NewList(elems) - } - - sign := signum(step) - var list []Value - for i := start; signum(end-i) == sign; i += step { - list = append(list, l.elems[i]) - } - return NewList(list) -} - -func (l *List) Attr(name string) (Value, error) { return builtinAttr(l, name, listMethods) } -func (l *List) AttrNames() []string { return builtinAttrNames(listMethods) } - -func (l *List) Iterate() Iterator { - if !l.frozen { - l.itercount++ - } - return &listIterator{l: l} -} - -func (x *List) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*List) - // It's tempting to check x == y as an optimization here, - // but wrong because a list containing NaN is not equal to itself. - return sliceCompare(op, x.elems, y.elems, depth) -} - -func sliceCompare(op syntax.Token, x, y []Value, depth int) (bool, error) { - // Fast path: check length. - if len(x) != len(y) && (op == syntax.EQL || op == syntax.NEQ) { - return op == syntax.NEQ, nil - } - - // Find first element that is not equal in both lists. - for i := 0; i < len(x) && i < len(y); i++ { - if eq, err := EqualDepth(x[i], y[i], depth-1); err != nil { - return false, err - } else if !eq { - switch op { - case syntax.EQL: - return false, nil - case syntax.NEQ: - return true, nil - default: - return CompareDepth(op, x[i], y[i], depth-1) - } - } - } - - return threeway(op, len(x)-len(y)), nil -} - -type listIterator struct { - l *List - i int -} - -func (it *listIterator) Next(p *Value) bool { - if it.i < it.l.Len() { - *p = it.l.elems[it.i] - it.i++ - return true - } - return false -} - -func (it *listIterator) Done() { - if !it.l.frozen { - it.l.itercount-- - } -} - -func (l *List) SetIndex(i int, v Value) error { - if err := l.checkMutable("assign to element of"); err != nil { - return err - } - l.elems[i] = v - return nil -} - -func (l *List) Append(v Value) error { - if err := l.checkMutable("append to"); err != nil { - return err - } - l.elems = append(l.elems, v) - return nil -} - -func (l *List) Clear() error { - if err := l.checkMutable("clear"); err != nil { - return err - } - for i := range l.elems { - l.elems[i] = nil // aid GC - } - l.elems = l.elems[:0] - return nil -} - -// A Tuple represents a Starlark tuple value. -type Tuple []Value - -func (t Tuple) Len() int { return len(t) } -func (t Tuple) Index(i int) Value { return t[i] } - -func (t Tuple) Slice(start, end, step int) Value { - if step == 1 { - return t[start:end] - } - - sign := signum(step) - var tuple Tuple - for i := start; signum(end-i) == sign; i += step { - tuple = append(tuple, t[i]) - } - return tuple -} - -func (t Tuple) Iterate() Iterator { return &tupleIterator{elems: t} } -func (t Tuple) Freeze() { - for _, elem := range t { - elem.Freeze() - } -} -func (t Tuple) String() string { return toString(t) } -func (t Tuple) Type() string { return "tuple" } -func (t Tuple) Truth() Bool { return len(t) > 0 } - -func (x Tuple) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Tuple) - return sliceCompare(op, x, y, depth) -} - -func (t Tuple) Hash() (uint32, error) { - // Use same algorithm as Python. - var x, mult uint32 = 0x345678, 1000003 - for _, elem := range t { - y, err := elem.Hash() - if err != nil { - return 0, err - } - x = x ^ y*mult - mult += 82520 + uint32(len(t)+len(t)) - } - return x, nil -} - -type tupleIterator struct{ elems Tuple } - -func (it *tupleIterator) Next(p *Value) bool { - if len(it.elems) > 0 { - *p = it.elems[0] - it.elems = it.elems[1:] - return true - } - return false -} - -func (it *tupleIterator) Done() {} - -// A Set represents a Starlark set value. -// The zero value of Set is a valid empty set. -// If you know the exact final number of elements, -// it is more efficient to call NewSet. -type Set struct { - ht hashtable // values are all None -} - -// NewSet returns a dictionary with initial space for -// at least size insertions before rehashing. -func NewSet(size int) *Set { - set := new(Set) - set.ht.init(size) - return set -} - -func (s *Set) Delete(k Value) (found bool, err error) { _, found, err = s.ht.delete(k); return } -func (s *Set) Clear() error { return s.ht.clear() } -func (s *Set) Has(k Value) (found bool, err error) { _, found, err = s.ht.lookup(k); return } -func (s *Set) Insert(k Value) error { return s.ht.insert(k, None) } -func (s *Set) Len() int { return int(s.ht.len) } -func (s *Set) Iterate() Iterator { return s.ht.iterate() } -func (s *Set) String() string { return toString(s) } -func (s *Set) Type() string { return "set" } -func (s *Set) elems() []Value { return s.ht.keys() } -func (s *Set) Freeze() { s.ht.freeze() } -func (s *Set) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: set") } -func (s *Set) Truth() Bool { return s.Len() > 0 } - -func (s *Set) Attr(name string) (Value, error) { return builtinAttr(s, name, setMethods) } -func (s *Set) AttrNames() []string { return builtinAttrNames(setMethods) } - -func (x *Set) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*Set) - switch op { - case syntax.EQL: - ok, err := setsEqual(x, y, depth) - return ok, err - case syntax.NEQ: - ok, err := setsEqual(x, y, depth) - return !ok, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func setsEqual(x, y *Set, depth int) (bool, error) { - if x.Len() != y.Len() { - return false, nil - } - for _, elem := range x.elems() { - if found, _ := y.Has(elem); !found { - return false, nil - } - } - return true, nil -} - -func (s *Set) Union(iter Iterator) (Value, error) { - set := new(Set) - for _, elem := range s.elems() { - set.Insert(elem) // can't fail - } - var x Value - for iter.Next(&x) { - if err := set.Insert(x); err != nil { - return nil, err - } - } - return set, nil -} - -// toString returns the string form of value v. -// It may be more efficient than v.String() for larger values. -func toString(v Value) string { - buf := new(strings.Builder) - writeValue(buf, v, nil) - return buf.String() -} - -// writeValue writes x to out. -// -// path is used to detect cycles. -// It contains the list of *List and *Dict values we're currently printing. -// (These are the only potentially cyclic structures.) -// Callers should generally pass nil for path. -// It is safe to re-use the same path slice for multiple calls. -func writeValue(out *strings.Builder, x Value, path []Value) { - switch x := x.(type) { - case nil: - out.WriteString("") // indicates a bug - - case NoneType: - out.WriteString("None") - - case Int: - out.WriteString(x.String()) - - case Bool: - if x { - out.WriteString("True") - } else { - out.WriteString("False") - } - - case String: - fmt.Fprintf(out, "%q", string(x)) - - case *List: - out.WriteByte('[') - if pathContains(path, x) { - out.WriteString("...") // list contains itself - } else { - for i, elem := range x.elems { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, append(path, x)) - } - } - out.WriteByte(']') - - case Tuple: - out.WriteByte('(') - for i, elem := range x { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, path) - } - if len(x) == 1 { - out.WriteByte(',') - } - out.WriteByte(')') - - case *Function: - fmt.Fprintf(out, "", x.Name()) - - case *Builtin: - if x.recv != nil { - fmt.Fprintf(out, "", x.Name(), x.recv.Type()) - } else { - fmt.Fprintf(out, "", x.Name()) - } - - case *Dict: - out.WriteByte('{') - if pathContains(path, x) { - out.WriteString("...") // dict contains itself - } else { - sep := "" - for _, item := range x.Items() { - k, v := item[0], item[1] - out.WriteString(sep) - writeValue(out, k, path) - out.WriteString(": ") - writeValue(out, v, append(path, x)) // cycle check - sep = ", " - } - } - out.WriteByte('}') - - case *Set: - out.WriteString("set([") - for i, elem := range x.elems() { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, path) - } - out.WriteString("])") - - default: - out.WriteString(x.String()) - } -} - -func pathContains(path []Value, x Value) bool { - for _, y := range path { - if x == y { - return true - } - } - return false -} - -const maxdepth = 10 - -// Equal reports whether two Starlark values are equal. -func Equal(x, y Value) (bool, error) { - if x, ok := x.(String); ok { - return x == y, nil // fast path for an important special case - } - return EqualDepth(x, y, maxdepth) -} - -// EqualDepth reports whether two Starlark values are equal. -// -// Recursive comparisons by implementations of Value.CompareSameType -// should use EqualDepth to prevent infinite recursion. -func EqualDepth(x, y Value, depth int) (bool, error) { - return CompareDepth(syntax.EQL, x, y, depth) -} - -// Compare compares two Starlark values. -// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. -// Compare returns an error if an ordered comparison was -// requested for a type that does not support it. -// -// Recursive comparisons by implementations of Value.CompareSameType -// should use CompareDepth to prevent infinite recursion. -func Compare(op syntax.Token, x, y Value) (bool, error) { - return CompareDepth(op, x, y, maxdepth) -} - -// CompareDepth compares two Starlark values. -// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. -// CompareDepth returns an error if an ordered comparison was -// requested for a pair of values that do not support it. -// -// The depth parameter limits the maximum depth of recursion -// in cyclic data structures. -func CompareDepth(op syntax.Token, x, y Value, depth int) (bool, error) { - if depth < 1 { - return false, fmt.Errorf("comparison exceeded maximum recursion depth") - } - if sameType(x, y) { - if xcomp, ok := x.(Comparable); ok { - return xcomp.CompareSameType(op, y, depth) - } - - // use identity comparison - switch op { - case syntax.EQL: - return x == y, nil - case syntax.NEQ: - return x != y, nil - } - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } - - // different types - - // int/float ordered comparisons - switch x := x.(type) { - case Int: - if y, ok := y.(Float); ok { - if y != y { - return false, nil // y is NaN - } - var cmp int - if !math.IsInf(float64(y), 0) { - cmp = x.rational().Cmp(y.rational()) // y is finite - } else if y > 0 { - cmp = -1 // y is +Inf - } else { - cmp = +1 // y is -Inf - } - return threeway(op, cmp), nil - } - case Float: - if y, ok := y.(Int); ok { - if x != x { - return false, nil // x is NaN - } - var cmp int - if !math.IsInf(float64(x), 0) { - cmp = x.rational().Cmp(y.rational()) // x is finite - } else if x > 0 { - cmp = -1 // x is +Inf - } else { - cmp = +1 // x is -Inf - } - return threeway(op, cmp), nil - } - } - - // All other values of different types compare unequal. - switch op { - case syntax.EQL: - return false, nil - case syntax.NEQ: - return true, nil - } - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) -} - -func sameType(x, y Value) bool { - return reflect.TypeOf(x) == reflect.TypeOf(y) || x.Type() == y.Type() -} - -// threeway interprets a three-way comparison value cmp (-1, 0, +1) -// as a boolean comparison (e.g. x < y). -func threeway(op syntax.Token, cmp int) bool { - switch op { - case syntax.EQL: - return cmp == 0 - case syntax.NEQ: - return cmp != 0 - case syntax.LE: - return cmp <= 0 - case syntax.LT: - return cmp < 0 - case syntax.GE: - return cmp >= 0 - case syntax.GT: - return cmp > 0 - } - panic(op) -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -// Len returns the length of a string or sequence value, -// and -1 for all others. -// -// Warning: Len(x) >= 0 does not imply Iterate(x) != nil. -// A string has a known length but is not directly iterable. -func Len(x Value) int { - switch x := x.(type) { - case String: - return x.Len() - case Sequence: - return x.Len() - } - return -1 -} - -// Iterate return a new iterator for the value if iterable, nil otherwise. -// If the result is non-nil, the caller must call Done when finished with it. -// -// Warning: Iterate(x) != nil does not imply Len(x) >= 0. -// Some iterables may have unknown length. -func Iterate(x Value) Iterator { - if x, ok := x.(Iterable); ok { - return x.Iterate() - } - return nil -} diff --git a/vendor/github.com/k14s/starlark-go/starlarkstruct/module.go b/vendor/github.com/k14s/starlark-go/starlarkstruct/module.go deleted file mode 100644 index 5097f5619..000000000 --- a/vendor/github.com/k14s/starlark-go/starlarkstruct/module.go +++ /dev/null @@ -1,43 +0,0 @@ -package starlarkstruct - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" -) - -// A Module is a named collection of values, -// typically a suite of functions imported by a load statement. -// -// It differs from Struct primarily in that its string representation -// does not enumerate its fields. -type Module struct { - Name string - Members starlark.StringDict -} - -var _ starlark.HasAttrs = (*Module)(nil) - -func (m *Module) Attr(name string) (starlark.Value, error) { return m.Members[name], nil } -func (m *Module) AttrNames() []string { return m.Members.Keys() } -func (m *Module) Freeze() { m.Members.Freeze() } -func (m *Module) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", m.Type()) } -func (m *Module) String() string { return fmt.Sprintf("", m.Name) } -func (m *Module) Truth() starlark.Bool { return true } -func (m *Module) Type() string { return "module" } - -// MakeModule may be used as the implementation of a Starlark built-in -// function, module(name, **kwargs). It returns a new module with the -// specified name and members. -func MakeModule(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - var name string - if err := starlark.UnpackPositionalArgs(b.Name(), args, nil, 1, &name); err != nil { - return nil, err - } - members := make(starlark.StringDict, len(kwargs)) - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - members[k] = kwarg[1] - } - return &Module{name, members}, nil -} diff --git a/vendor/github.com/k14s/starlark-go/starlarkstruct/struct.go b/vendor/github.com/k14s/starlark-go/starlarkstruct/struct.go deleted file mode 100644 index 378f93ca9..000000000 --- a/vendor/github.com/k14s/starlark-go/starlarkstruct/struct.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package starlarkstruct defines the Starlark types 'struct' and -// 'module', both optional language extensions. -// -package starlarkstruct // import "github.com/k14s/starlark-go/starlarkstruct" - -// It is tempting to introduce a variant of Struct that is a wrapper -// around a Go struct value, for stronger typing guarantees and more -// efficient and convenient field lookup. However: -// 1) all fields of Starlark structs are optional, so we cannot represent -// them using more specific types such as String, Int, *Depset, and -// *File, as such types give no way to represent missing fields. -// 2) the efficiency gain of direct struct field access is rather -// marginal: finding the index of a field by binary searching on the -// sorted list of field names is quite fast compared to the other -// overheads. -// 3) the gains in compactness and spatial locality are also rather -// marginal: the array behind the []entry slice is (due to field name -// strings) only a factor of 2 larger than the corresponding Go struct -// would be, and, like the Go struct, requires only a single allocation. - -import ( - "fmt" - "sort" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/syntax" -) - -// Make is the implementation of a built-in function that instantiates -// an immutable struct from the specified keyword arguments. -// -// An application can add 'struct' to the Starlark environment like so: -// -// globals := starlark.StringDict{ -// "struct": starlark.NewBuiltin("struct", starlarkstruct.Make), -// } -// -func Make(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if len(args) > 0 { - return nil, fmt.Errorf("struct: unexpected positional arguments") - } - return FromKeywords(Default, kwargs), nil -} - -// FromKeywords returns a new struct instance whose fields are specified by the -// key/value pairs in kwargs. (Each kwargs[i][0] must be a starlark.String.) -func FromKeywords(constructor starlark.Value, kwargs []starlark.Tuple) *Struct { - if constructor == nil { - panic("nil constructor") - } - s := &Struct{ - constructor: constructor, - entries: make(entries, 0, len(kwargs)), - } - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - v := kwarg[1] - s.entries = append(s.entries, entry{k, v}) - } - sort.Sort(s.entries) - return s -} - -// FromStringDict returns a whose elements are those of d. -// The constructor parameter specifies the constructor; use Default for an ordinary struct. -func FromStringDict(constructor starlark.Value, d starlark.StringDict) *Struct { - if constructor == nil { - panic("nil constructor") - } - s := &Struct{ - constructor: constructor, - entries: make(entries, 0, len(d)), - } - for k, v := range d { - s.entries = append(s.entries, entry{k, v}) - } - sort.Sort(s.entries) - return s -} - -// Struct is an immutable Starlark type that maps field names to values. -// It is not iterable and does not support len. -// -// A struct has a constructor, a distinct value that identifies a class -// of structs, and which appears in the struct's string representation. -// -// Operations such as x+y fail if the constructors of the two operands -// are not equal. -// -// The default constructor, Default, is the string "struct", but -// clients may wish to 'brand' structs for their own purposes. -// The constructor value appears in the printed form of the value, -// and is accessible using the Constructor method. -// -// Use Attr to access its fields and AttrNames to enumerate them. -type Struct struct { - constructor starlark.Value - entries entries // sorted by name -} - -// Default is the default constructor for structs. -// It is merely the string "struct". -const Default = starlark.String("struct") - -type entries []entry - -func (a entries) Len() int { return len(a) } -func (a entries) Less(i, j int) bool { return a[i].name < a[j].name } -func (a entries) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type entry struct { - name string - value starlark.Value -} - -var ( - _ starlark.HasAttrs = (*Struct)(nil) - _ starlark.HasBinary = (*Struct)(nil) -) - -// ToStringDict adds a name/value entry to d for each field of the struct. -func (s *Struct) ToStringDict(d starlark.StringDict) { - for _, e := range s.entries { - d[e.name] = e.value - } -} - -func (s *Struct) String() string { - buf := new(strings.Builder) - if s.constructor == Default { - // NB: The Java implementation always prints struct - // even for Bazel provider instances. - buf.WriteString("struct") // avoid String()'s quotation - } else { - buf.WriteString(s.constructor.String()) - } - buf.WriteByte('(') - for i, e := range s.entries { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(e.name) - buf.WriteString(" = ") - buf.WriteString(e.value.String()) - } - buf.WriteByte(')') - return buf.String() -} - -// Constructor returns the constructor used to create this struct. -func (s *Struct) Constructor() starlark.Value { return s.constructor } - -func (s *Struct) Type() string { return "struct" } -func (s *Struct) Truth() starlark.Bool { return true } // even when empty -func (s *Struct) Hash() (uint32, error) { - // Same algorithm as Tuple.hash, but with different primes. - var x, m uint32 = 8731, 9839 - for _, e := range s.entries { - namehash, _ := starlark.String(e.name).Hash() - x = x ^ 3*namehash - y, err := e.value.Hash() - if err != nil { - return 0, err - } - x = x ^ y*m - m += 7349 - } - return x, nil -} -func (s *Struct) Freeze() { - for _, e := range s.entries { - e.value.Freeze() - } -} - -func (x *Struct) Binary(op syntax.Token, y starlark.Value, side starlark.Side) (starlark.Value, error) { - if y, ok := y.(*Struct); ok && op == syntax.PLUS { - if side == starlark.Right { - x, y = y, x - } - - if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { - return nil, fmt.Errorf("in %s + %s: error comparing constructors: %v", - x.constructor, y.constructor, err) - } else if !eq { - return nil, fmt.Errorf("cannot add structs of different constructors: %s + %s", - x.constructor, y.constructor) - } - - z := make(starlark.StringDict, x.len()+y.len()) - for _, e := range x.entries { - z[e.name] = e.value - } - for _, e := range y.entries { - z[e.name] = e.value - } - - return FromStringDict(x.constructor, z), nil - } - return nil, nil // unhandled -} - -// Attr returns the value of the specified field. -func (s *Struct) Attr(name string) (starlark.Value, error) { - // Binary search the entries. - // This implementation is a specialization of - // sort.Search that avoids dynamic dispatch. - n := len(s.entries) - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) - if s.entries[h].name < name { - i = h + 1 - } else { - j = h - } - } - if i < n && s.entries[i].name == name { - return s.entries[i].value, nil - } - - var ctor string - if s.constructor != Default { - ctor = s.constructor.String() + " " - } - return nil, starlark.NoSuchAttrError( - fmt.Sprintf("%sstruct has no .%s attribute", ctor, name)) -} - -func (s *Struct) len() int { return len(s.entries) } - -// AttrNames returns a new sorted list of the struct fields. -func (s *Struct) AttrNames() []string { - names := make([]string, len(s.entries)) - for i, e := range s.entries { - names[i] = e.name - } - return names -} - -func (x *Struct) CompareSameType(op syntax.Token, y_ starlark.Value, depth int) (bool, error) { - y := y_.(*Struct) - switch op { - case syntax.EQL: - return structsEqual(x, y, depth) - case syntax.NEQ: - eq, err := structsEqual(x, y, depth) - return !eq, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func structsEqual(x, y *Struct, depth int) (bool, error) { - if x.len() != y.len() { - return false, nil - } - - if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { - return false, fmt.Errorf("error comparing struct constructors %v and %v: %v", - x.constructor, y.constructor, err) - } else if !eq { - return false, nil - } - - for i, n := 0, x.len(); i < n; i++ { - if x.entries[i].name != y.entries[i].name { - return false, nil - } else if eq, err := starlark.EqualDepth(x.entries[i].value, y.entries[i].value, depth-1); err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil -} diff --git a/vendor/github.com/k14s/starlark-go/syntax/block_scanner.go b/vendor/github.com/k14s/starlark-go/syntax/block_scanner.go deleted file mode 100644 index a33618cdb..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/block_scanner.go +++ /dev/null @@ -1,214 +0,0 @@ -package syntax - -import ( - "fmt" -) - -var _ = fmt.Sprintf - -type scannerInterface interface { - nextToken(*tokenValue) Token - error(pos Position, s string) - errorf(pos Position, format string, args ...interface{}) - recover(*error) - - getLineComments() []Comment - getSuffixComments() []Comment - getPos() Position -} - -// blockScanner changes INDENT/OUTDENT to be -// based on nesting depth (start->end) instead of whitespace -type blockScanner struct { - scanner *scanner - nextTokens []blockScannerToken - prevTokens []blockScannerToken - indentStack []blockScannerToken - debug bool -} - -var _ scannerInterface = &blockScanner{} - -type blockScannerToken struct { - val tokenValue - tok Token - alreadyOutdented bool -} - -func newBlockScanner(s *scanner) *blockScanner { - return &blockScanner{s, nil, nil, nil, false} -} - -func (s *blockScanner) nextToken(outVal *tokenValue) Token { - pair := s.nextTokenInner() - s.prevTokens = append(s.prevTokens, pair) - - if s.debug { - fmt.Printf("emit: %s => %#v\n", pair.tok.String(), pair.val) - } - - s.copyTokenValue(pair.val, outVal) - return pair.tok -} - -func (s *blockScanner) nextTokenInner() blockScannerToken { - if s.matchesNewBlock() { - return s.buildIndent() - } - - var currToken blockScannerToken - var tokSource string - - if len(s.nextTokens) > 0 { - tokSource = "read-buffer" - currToken = s.nextTokens[0] - s.nextTokens = s.nextTokens[1:] - } else { - tokSource = "read" - currToken = s.popNextToken() - } - - if s.debug { - fmt.Printf("\n%s: %s => %#v\n", tokSource, currToken.tok.String(), currToken.val) - } - - switch currToken.tok { - // 'else' is a special cases when we need to - // implicitly outdent since end is not specified in code - case ELSE: - if !currToken.alreadyOutdented { - // if 'else' is not followed by the colon assume - // this is an inline if-else hence no need to outdent - maybeColonToken := s.popNextToken() - - if maybeColonToken.tok == COLON { - currToken.alreadyOutdented = true - s.putBackToken(currToken) - s.putBackToken(maybeColonToken) - return s.buildOutdent() - } - - s.putBackToken(maybeColonToken) - } - - // 'elif' is special cases when we need to - // implicitly outdent since end is not specified in code - case ELIF: - if !currToken.alreadyOutdented { - currToken.alreadyOutdented = true - s.putBackToken(currToken) - return s.buildOutdent() - } - - // Skip parsed indent/outdent as we insert - // our own "indention" at appropriate times - case INDENT, OUTDENT: - return s.nextTokenInner() - - case PASS: - s.errorf(s.getPos(), "use of reserved keyword 'pass' is not allowed") - - // 'end' is identifier - case IDENT: - if currToken.val.raw == "end" { - s.swallowNextToken(NEWLINE) - return s.buildOutdent() - } - - case EOF: - if len(s.indentStack) != 0 { - pos := s.indentStack[len(s.indentStack)-1].val.pos - s.errorf(pos, "mismatched set of block openings (if/else/elif/for/def) and closing (end)") - } - - default: - // continue with curr token - } - - return currToken -} - -func (s *blockScanner) popNextToken() blockScannerToken { - val := tokenValue{} - tok := s.scanner.nextToken(&val) - return blockScannerToken{tok: tok, val: val} -} - -func (s *blockScanner) putBackToken(pair blockScannerToken) blockScannerToken { - s.nextTokens = append(s.nextTokens, pair) - return pair -} - -func (s *blockScanner) swallowNextToken(tok Token) { - token := s.popNextToken() - if token.tok != tok { - s.putBackToken(token) - } -} - -func (s *blockScanner) buildIndent() blockScannerToken { - s.indentStack = append(s.indentStack, s.prevTokens[len(s.prevTokens)-1]) - return blockScannerToken{ - tok: Token(INDENT), - val: tokenValue{pos: s.prevTokens[len(s.prevTokens)-1].val.pos}, - } -} - -func (s *blockScanner) buildOutdent() blockScannerToken { - if len(s.indentStack) == 0 { - s.error(s.getPos(), "unexpected end") - } - s.indentStack = s.indentStack[:len(s.indentStack)-1] - return blockScannerToken{ - tok: Token(OUTDENT), - val: tokenValue{pos: s.prevTokens[len(s.prevTokens)-1].val.pos}, - } -} - -func (s *blockScanner) matchesNewBlock() bool { - if len(s.prevTokens) < 2 { - return false - } - lastLastColon := s.prevTokens[len(s.prevTokens)-2].tok == COLON - lastNewline := s.prevTokens[len(s.prevTokens)-1].tok == NEWLINE - return lastLastColon && lastNewline -} - -func (s *blockScanner) copyTokenValue(left tokenValue, right *tokenValue) { - right.raw = left.raw - right.int = left.int - right.bigInt = left.bigInt - right.float = left.float - right.string = left.string - right.pos = left.pos -} - -// implement boring scanner methods -func (s *blockScanner) error(pos Position, str string) { - s.scanner.error(pos, str) -} - -func (s *blockScanner) errorf(pos Position, format string, args ...interface{}) { - s.scanner.errorf(pos, format, args...) -} - -func (s *blockScanner) recover(err *error) { - s.scanner.recover(err) -} - -func (s *blockScanner) getLineComments() []Comment { - return s.scanner.getLineComments() -} - -func (s *blockScanner) getSuffixComments() []Comment { - return s.scanner.getSuffixComments() -} - -func (s *blockScanner) getPos() Position { return s.scanner.getPos() } - -// augment regular scanner -var _ scannerInterface = &scanner{} - -func (s *scanner) getLineComments() []Comment { return s.lineComments } -func (s *scanner) getSuffixComments() []Comment { return s.suffixComments } -func (s *scanner) getPos() Position { return s.pos } diff --git a/vendor/github.com/k14s/starlark-go/syntax/grammar.txt b/vendor/github.com/k14s/starlark-go/syntax/grammar.txt deleted file mode 100644 index 0a1988b62..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/grammar.txt +++ /dev/null @@ -1,132 +0,0 @@ - -Grammar of Starlark -================== - -File = {Statement | newline} eof . - -Statement = DefStmt | IfStmt | ForStmt | WhileStmt | SimpleStmt . - -DefStmt = 'def' identifier '(' [Parameters [',']] ')' ':' Suite . - -Parameters = Parameter {',' Parameter}. - -Parameter = identifier | identifier '=' Test | '*' | '*' identifier | '**' identifier . - -IfStmt = 'if' Test ':' Suite {'elif' Test ':' Suite} ['else' ':' Suite] . - -ForStmt = 'for' LoopVariables 'in' Expression ':' Suite . - -WhileStmt = 'while' Test ':' Suite . - -Suite = [newline indent {Statement} outdent] | SimpleStmt . - -SimpleStmt = SmallStmt {';' SmallStmt} [';'] '\n' . -# NOTE: '\n' optional at EOF - -SmallStmt = ReturnStmt - | BreakStmt | ContinueStmt | PassStmt - | AssignStmt - | ExprStmt - | LoadStmt - . - -ReturnStmt = 'return' [Expression] . -BreakStmt = 'break' . -ContinueStmt = 'continue' . -PassStmt = 'pass' . -AssignStmt = Expression ('=' | '+=' | '-=' | '*=' | '/=' | '//=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') Expression . -ExprStmt = Expression . - -LoadStmt = 'load' '(' string {',' [identifier '='] string} [','] ')' . - -Test = LambdaExpr - | IfExpr - | PrimaryExpr - | UnaryExpr - | BinaryExpr - . - -LambdaExpr = 'lambda' [Parameters] ':' Test . - -IfExpr = Test 'if' Test 'else' Test . - -PrimaryExpr = Operand - | PrimaryExpr DotSuffix - | PrimaryExpr CallSuffix - | PrimaryExpr SliceSuffix - . - -Operand = identifier - | int | float | string - | ListExpr | ListComp - | DictExpr | DictComp - | '(' [Expression [',']] ')' - | ('-' | '+') PrimaryExpr - . - -DotSuffix = '.' identifier . -CallSuffix = '(' [Arguments [',']] ')' . -SliceSuffix = '[' [Expression] [':' Test [':' Test]] ']' . - -# A CallSuffix does not allow a trailing comma -# if the last argument is '*' Test or '**' Test. - -Arguments = Argument {',' Argument} . -Argument = Test | identifier '=' Test | '*' Test | '**' Test . - -ListExpr = '[' [Expression [',']] ']' . -ListComp = '[' Test {CompClause} ']'. - -DictExpr = '{' [Entries [',']] '}' . -DictComp = '{' Entry {CompClause} '}' . -Entries = Entry {',' Entry} . -Entry = Test ':' Test . - -CompClause = 'for' LoopVariables 'in' Test | 'if' Test . - -UnaryExpr = 'not' Test . - -BinaryExpr = Test {Binop Test} . - -Binop = 'or' - | 'and' - | '==' | '!=' | '<' | '>' | '<=' | '>=' | 'in' | 'not' 'in' - | '|' - | '^' - | '&' - | '-' | '+' - | '*' | '%' | '/' | '//' - . - -Expression = Test {',' Test} . -# NOTE: trailing comma permitted only when within [...] or (...). - -LoopVariables = PrimaryExpr {',' PrimaryExpr} . - - -# Notation (similar to Go spec): -- lowercase and 'quoted' items are lexical tokens. -- Capitalized names denote grammar productions. -- (...) implies grouping -- x | y means either x or y. -- [x] means x is optional -- {x} means x is repeated zero or more times -- The end of each declaration is marked with a period. - -# Tokens -- spaces: newline, eof, indent, outdent. -- identifier. -- literals: string, int, float. -- plus all quoted tokens such as '+=', 'return'. - -# Notes: -- Ambiguity is resolved using operator precedence. -- The grammar does not enforce the legal order of params and args, - nor that the first compclause must be a 'for'. - -TODO: -- explain how the lexer generates indent, outdent, and newline tokens. -- why is unary NOT separated from unary - and +? -- the grammar is (mostly) in LL(1) style so, for example, - dot expressions are formed suffixes, not complete expressions, - which makes the spec harder to read. Reorganize into non-LL(1) form? diff --git a/vendor/github.com/k14s/starlark-go/syntax/parse.go b/vendor/github.com/k14s/starlark-go/syntax/parse.go deleted file mode 100644 index c224ad04e..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/parse.go +++ /dev/null @@ -1,1051 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// This file defines a recursive-descent parser for Starlark. -// The LL(1) grammar of Starlark and the names of many productions follow Python 2.7. -// -// TODO(adonovan): use syntax.Error more systematically throughout the -// package. Verify that error positions are correct using the -// chunkedfile mechanism. - -import "log" - -// Enable this flag to print the token stream and log.Fatal on the first error. -const debug = false - -// A Mode value is a set of flags (or 0) that controls optional parser functionality. -type Mode uint - -const ( - RetainComments Mode = 1 << iota // retain comments in AST; see Node.Comments - BlockScanner Mode = 1 << iota // use if/end syntax instead of indent -) - -// Parse parses the input data and returns the corresponding parse tree. -// -// If src != nil, ParseFile parses the source from src and the filename -// is only used when recording position information. -// The type of the argument for the src parameter must be string, -// []byte, or io.Reader. -// If src == nil, ParseFile parses the file specified by filename. -func Parse(filename string, src interface{}, mode Mode) (f *File, err error) { - in, err := newScanner(filename, src, mode&RetainComments != 0) - if err != nil { - return nil, err - } - var inScanner scannerInterface = in - if (mode & BlockScanner) == BlockScanner { - inScanner = newBlockScanner(in) - } - p := parser{in: inScanner} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - f = p.parseFile() - if f != nil { - f.Path = filename - } - p.assignComments(f) - return f, nil -} - -// ParseCompoundStmt parses a single compound statement: -// a blank line, a def, for, while, or if statement, or a -// semicolon-separated list of simple statements followed -// by a newline. These are the units on which the REPL operates. -// ParseCompoundStmt does not consume any following input. -// The parser calls the readline function each -// time it needs a new line of input. -func ParseCompoundStmt(filename string, readline func() ([]byte, error)) (f *File, err error) { - in, err := newScanner(filename, readline, false) - if err != nil { - return nil, err - } - - p := parser{in: newBlockScanner(in)} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - - var stmts []Stmt - switch p.tok { - case DEF, IF, FOR, WHILE: - stmts = p.parseStmt(stmts) - case NEWLINE: - // blank line - default: - stmts = p.parseSimpleStmt(stmts, false) - // Require but don't consume newline, to avoid blocking again. - if p.tok != NEWLINE { - p.in.errorf(p.in.getPos(), "invalid syntax") - } - } - - return &File{Path: filename, Stmts: stmts}, nil -} - -// ParseExpr parses a Starlark expression. -// A comma-separated list of expressions is parsed as a tuple. -// See Parse for explanation of parameters. -func ParseExpr(filename string, src interface{}, mode Mode) (expr Expr, err error) { - in, err := newScanner(filename, src, mode&RetainComments != 0) - if err != nil { - return nil, err - } - p := parser{in: newBlockScanner(in)} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - - // Use parseExpr, not parseTest, to permit an unparenthesized tuple. - expr = p.parseExpr(false) - - // A following newline (e.g. "f()\n") appears outside any brackets, - // on a non-blank line, and thus results in a NEWLINE token. - if p.tok == NEWLINE { - p.nextToken() - } - - if p.tok != EOF { - p.in.errorf(p.in.getPos(), "got %#v after expression, want EOF", p.tok) - } - p.assignComments(expr) - return expr, nil -} - -type parser struct { - in scannerInterface - tok Token - tokval tokenValue -} - -// nextToken advances the scanner and returns the position of the -// previous token. -func (p *parser) nextToken() Position { - oldpos := p.tokval.pos - p.tok = p.in.nextToken(&p.tokval) - // enable to see the token stream - if debug { - log.Printf("nextToken: %-20s%+v\n", p.tok, p.tokval.pos) - } - return oldpos -} - -// file_input = (NEWLINE | stmt)* EOF -func (p *parser) parseFile() *File { - var stmts []Stmt - for p.tok != EOF { - if p.tok == NEWLINE { - p.nextToken() - continue - } - stmts = p.parseStmt(stmts) - } - return &File{Stmts: stmts} -} - -func (p *parser) parseStmt(stmts []Stmt) []Stmt { - if p.tok == DEF { - return append(stmts, p.parseDefStmt()) - } else if p.tok == IF { - return append(stmts, p.parseIfStmt()) - } else if p.tok == FOR { - return append(stmts, p.parseForStmt()) - } else if p.tok == WHILE { - return append(stmts, p.parseWhileStmt()) - } - return p.parseSimpleStmt(stmts, true) -} - -func (p *parser) parseDefStmt() Stmt { - defpos := p.nextToken() // consume DEF - id := p.parseIdent() - p.consume(LPAREN) - params := p.parseParams() - p.consume(RPAREN) - p.consume(COLON) - body := p.parseSuite() - return &DefStmt{ - Def: defpos, - Name: id, - Params: params, - Body: body, - } -} - -func (p *parser) parseIfStmt() Stmt { - ifpos := p.nextToken() // consume IF - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - ifStmt := &IfStmt{ - If: ifpos, - Cond: cond, - True: body, - } - tail := ifStmt - for p.tok == ELIF { - elifpos := p.nextToken() // consume ELIF - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - elif := &IfStmt{ - If: elifpos, - Cond: cond, - True: body, - } - tail.ElsePos = elifpos - tail.False = []Stmt{elif} - tail = elif - } - if p.tok == ELSE { - tail.ElsePos = p.nextToken() // consume ELSE - p.consume(COLON) - tail.False = p.parseSuite() - } - return ifStmt -} - -func (p *parser) parseForStmt() Stmt { - forpos := p.nextToken() // consume FOR - vars := p.parseForLoopVariables() - p.consume(IN) - x := p.parseExpr(false) - p.consume(COLON) - body := p.parseSuite() - return &ForStmt{ - For: forpos, - Vars: vars, - X: x, - Body: body, - } -} - -func (p *parser) parseWhileStmt() Stmt { - whilepos := p.nextToken() // consume WHILE - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - return &WhileStmt{ - While: whilepos, - Cond: cond, - Body: body, - } -} - -// Equivalent to 'exprlist' production in Python grammar. -// -// loop_variables = primary_with_suffix (COMMA primary_with_suffix)* COMMA? -func (p *parser) parseForLoopVariables() Expr { - // Avoid parseExpr because it would consume the IN token - // following x in "for x in y: ...". - v := p.parsePrimaryWithSuffix() - if p.tok != COMMA { - return v - } - - list := []Expr{v} - for p.tok == COMMA { - p.nextToken() - if terminatesExprList(p.tok) { - break - } - list = append(list, p.parsePrimaryWithSuffix()) - } - return &TupleExpr{List: list} -} - -// simple_stmt = small_stmt (SEMI small_stmt)* SEMI? NEWLINE -// In REPL mode, it does not consume the NEWLINE. -func (p *parser) parseSimpleStmt(stmts []Stmt, consumeNL bool) []Stmt { - for { - stmts = append(stmts, p.parseSmallStmt()) - if p.tok != SEMI { - break - } - p.nextToken() // consume SEMI - if p.tok == NEWLINE || p.tok == EOF { - break - } - } - // EOF without NEWLINE occurs in `if x: pass`, for example. - if p.tok != EOF && consumeNL { - p.consume(NEWLINE) - } - - return stmts -} - -// small_stmt = RETURN expr? -// | PASS | BREAK | CONTINUE -// | LOAD ... -// | expr ('=' | '+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') expr // assign -// | expr -func (p *parser) parseSmallStmt() Stmt { - switch p.tok { - case RETURN: - pos := p.nextToken() // consume RETURN - var result Expr - if p.tok != EOF && p.tok != NEWLINE && p.tok != SEMI { - result = p.parseExpr(false) - } - return &ReturnStmt{Return: pos, Result: result} - - case BREAK, CONTINUE, PASS: - tok := p.tok - pos := p.nextToken() // consume it - return &BranchStmt{Token: tok, TokenPos: pos} - - case LOAD: - return p.parseLoadStmt() - } - - // Assignment - x := p.parseExpr(false) - switch p.tok { - case EQ, PLUS_EQ, MINUS_EQ, STAR_EQ, SLASH_EQ, SLASHSLASH_EQ, PERCENT_EQ, AMP_EQ, PIPE_EQ, CIRCUMFLEX_EQ, LTLT_EQ, GTGT_EQ: - op := p.tok - pos := p.nextToken() // consume op - rhs := p.parseExpr(false) - return &AssignStmt{OpPos: pos, Op: op, LHS: x, RHS: rhs} - } - - // Expression statement (e.g. function call, doc string). - return &ExprStmt{X: x} -} - -// stmt = LOAD '(' STRING {',' (IDENT '=')? STRING} [','] ')' -func (p *parser) parseLoadStmt() *LoadStmt { - loadPos := p.nextToken() // consume LOAD - lparen := p.consume(LPAREN) - - if p.tok != STRING { - p.in.errorf(p.in.getPos(), "first operand of load statement must be a string literal") - } - module := p.parsePrimary().(*Literal) - - var from, to []*Ident - for p.tok != RPAREN && p.tok != EOF { - p.consume(COMMA) - if p.tok == RPAREN { - break // allow trailing comma - } - switch p.tok { - case STRING: - // load("module", "id") - // To name is same as original. - lit := p.parsePrimary().(*Literal) - id := &Ident{ - NamePos: lit.TokenPos.add(`"`), - Name: lit.Value.(string), - } - to = append(to, id) - from = append(from, id) - - case IDENT: - // load("module", to="from") - id := p.parseIdent() - to = append(to, id) - if p.tok != EQ { - p.in.errorf(p.in.getPos(), `load operand must be "%[1]s" or %[1]s="originalname" (want '=' after %[1]s)`, id.Name) - } - p.consume(EQ) - if p.tok != STRING { - p.in.errorf(p.in.getPos(), `original name of loaded symbol must be quoted: %s="originalname"`, id.Name) - } - lit := p.parsePrimary().(*Literal) - from = append(from, &Ident{ - NamePos: lit.TokenPos.add(`"`), - Name: lit.Value.(string), - }) - - case RPAREN: - p.in.errorf(p.in.getPos(), "trailing comma in load statement") - - default: - p.in.errorf(p.in.getPos(), `load operand must be "name" or localname="name" (got %#v)`, p.tok) - } - } - rparen := p.consume(RPAREN) - - if len(to) == 0 { - p.in.errorf(lparen, "load statement must import at least 1 symbol") - } - return &LoadStmt{ - Load: loadPos, - Module: module, - To: to, - From: from, - Rparen: rparen, - } -} - -// suite is typically what follows a COLON (e.g. after DEF or FOR). -// suite = simple_stmt | NEWLINE INDENT stmt+ OUTDENT -func (p *parser) parseSuite() []Stmt { - if p.tok == NEWLINE { - p.nextToken() // consume NEWLINE - p.consume(INDENT) - var stmts []Stmt - for p.tok != OUTDENT && p.tok != EOF { - stmts = p.parseStmt(stmts) - } - p.consume(OUTDENT) - return stmts - } - - return p.parseSimpleStmt(nil, true) -} - -func (p *parser) parseIdent() *Ident { - if p.tok != IDENT { - for _, v := range keywordToken { - if p.tok == v { - p.in.errorf(p.in.getPos(), "use of reserved keyword '%s' is not allowed (expected identifier)", p.tokval.raw) - } - } - p.in.errorf(p.in.getPos(), "not an identifier") - } - id := &Ident{ - NamePos: p.tokval.pos, - Name: p.tokval.raw, - } - p.nextToken() - return id -} - -func (p *parser) consume(t Token) Position { - if p.tok != t { - p.in.errorf(p.in.getPos(), "got %#v, want %#v", p.tok, t) - } - return p.nextToken() -} - -// params = (param COMMA)* param -// | -// -// param = IDENT -// | IDENT EQ test -// | STAR -// | STAR IDENT -// | STARSTAR IDENT -// -// parseParams parses a parameter list. The resulting expressions are of the form: -// -// *Ident x -// *Binary{Op: EQ, X: *Ident, Y: Expr} x=y -// *Unary{Op: STAR} * -// *Unary{Op: STAR, X: *Ident} *args -// *Unary{Op: STARSTAR, X: *Ident} **kwargs -func (p *parser) parseParams() []Expr { - var params []Expr - stars := false - for p.tok != RPAREN && p.tok != COLON && p.tok != EOF { - if len(params) > 0 { - p.consume(COMMA) - } - if p.tok == RPAREN { - // list can end with a COMMA if there is neither * nor ** - if stars { - p.in.errorf(p.in.getPos(), "got %#v, want parameter", p.tok) - } - break - } - - // * or *args or **kwargs - if p.tok == STAR || p.tok == STARSTAR { - stars = true - op := p.tok - pos := p.nextToken() - var x Expr - if op == STARSTAR || p.tok == IDENT { - x = p.parseIdent() - } - params = append(params, &UnaryExpr{ - OpPos: pos, - Op: op, - X: x, - }) - continue - } - - // IDENT - // IDENT = test - id := p.parseIdent() - if p.tok == EQ { // default value - eq := p.nextToken() - dflt := p.parseTest() - params = append(params, &BinaryExpr{ - X: id, - OpPos: eq, - Op: EQ, - Y: dflt, - }) - continue - } - - params = append(params, id) - } - return params -} - -// parseExpr parses an expression, possible consisting of a -// comma-separated list of 'test' expressions. -// -// In many cases we must use parseTest to avoid ambiguity such as -// f(x, y) vs. f((x, y)). -func (p *parser) parseExpr(inParens bool) Expr { - x := p.parseTest() - if p.tok != COMMA { - return x - } - - // tuple - exprs := p.parseExprs([]Expr{x}, inParens) - return &TupleExpr{List: exprs} -} - -// parseExprs parses a comma-separated list of expressions, starting with the comma. -// It is used to parse tuples and list elements. -// expr_list = (',' expr)* ','? -func (p *parser) parseExprs(exprs []Expr, allowTrailingComma bool) []Expr { - for p.tok == COMMA { - pos := p.nextToken() - if terminatesExprList(p.tok) { - if !allowTrailingComma { - p.in.error(pos, "unparenthesized tuple with trailing comma") - } - break - } - exprs = append(exprs, p.parseTest()) - } - return exprs -} - -// parseTest parses a 'test', a single-component expression. -func (p *parser) parseTest() Expr { - if p.tok == LAMBDA { - return p.parseLambda(true) - } - - x := p.parseTestPrec(0) - - // conditional expression (t IF cond ELSE f) - if p.tok == IF { - ifpos := p.nextToken() - cond := p.parseTestPrec(0) - if p.tok != ELSE { - p.in.error(ifpos, "conditional expression without else clause") - } - elsepos := p.nextToken() - else_ := p.parseTest() - return &CondExpr{If: ifpos, Cond: cond, True: x, ElsePos: elsepos, False: else_} - } - - return x -} - -// parseTestNoCond parses a a single-component expression without -// consuming a trailing 'if expr else expr'. -func (p *parser) parseTestNoCond() Expr { - if p.tok == LAMBDA { - return p.parseLambda(false) - } - return p.parseTestPrec(0) -} - -// parseLambda parses a lambda expression. -// The allowCond flag allows the body to be an 'a if b else c' conditional. -func (p *parser) parseLambda(allowCond bool) Expr { - lambda := p.nextToken() - var params []Expr - if p.tok != COLON { - params = p.parseParams() - } - p.consume(COLON) - - var body Expr - if allowCond { - body = p.parseTest() - } else { - body = p.parseTestNoCond() - } - - return &LambdaExpr{ - Lambda: lambda, - Params: params, - Body: body, - } -} - -func (p *parser) parseTestPrec(prec int) Expr { - if prec >= len(preclevels) { - return p.parsePrimaryWithSuffix() - } - - // expr = NOT expr - if p.tok == NOT && prec == int(precedence[NOT]) { - pos := p.nextToken() - x := p.parseTestPrec(prec) - return &UnaryExpr{ - OpPos: pos, - Op: NOT, - X: x, - } - } - - return p.parseBinopExpr(prec) -} - -// expr = test (OP test)* -// Uses precedence climbing; see http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm#climbing. -func (p *parser) parseBinopExpr(prec int) Expr { - x := p.parseTestPrec(prec + 1) - for first := true; ; first = false { - if p.tok == NOT { - p.nextToken() // consume NOT - // In this context, NOT must be followed by IN. - // Replace NOT IN by a single NOT_IN token. - if p.tok != IN { - p.in.errorf(p.in.getPos(), "got %#v, want in", p.tok) - } - p.tok = NOT_IN - } - - // Binary operator of specified precedence? - opprec := int(precedence[p.tok]) - if opprec < prec { - return x - } - - // Comparisons are non-associative. - if !first && opprec == int(precedence[EQL]) { - p.in.errorf(p.in.getPos(), "%s does not associate with %s (use parens)", - x.(*BinaryExpr).Op, p.tok) - } - - op := p.tok - pos := p.nextToken() - y := p.parseTestPrec(opprec + 1) - x = &BinaryExpr{OpPos: pos, Op: op, X: x, Y: y} - } -} - -// precedence maps each operator to its precedence (0-7), or -1 for other tokens. -var precedence [maxToken]int8 - -// preclevels groups operators of equal precedence. -// Comparisons are nonassociative; other binary operators associate to the left. -// Unary MINUS, unary PLUS, and TILDE have higher precedence so are handled in parsePrimary. -// See https://github.com/google/starlark-go/blob/master/doc/spec.md#binary-operators -var preclevels = [...][]Token{ - {OR}, // or - {AND}, // and - {NOT}, // not (unary) - {EQL, NEQ, LT, GT, LE, GE, IN, NOT_IN}, // == != < > <= >= in not in - {PIPE}, // | - {CIRCUMFLEX}, // ^ - {AMP}, // & - {LTLT, GTGT}, // << >> - {MINUS, PLUS}, // - - {STAR, PERCENT, SLASH, SLASHSLASH}, // * % / // -} - -func init() { - // populate precedence table - for i := range precedence { - precedence[i] = -1 - } - for level, tokens := range preclevels { - for _, tok := range tokens { - precedence[tok] = int8(level) - } - } -} - -// primary_with_suffix = primary -// | primary '.' IDENT -// | primary slice_suffix -// | primary call_suffix -func (p *parser) parsePrimaryWithSuffix() Expr { - x := p.parsePrimary() - for { - switch p.tok { - case DOT: - dot := p.nextToken() - id := p.parseIdent() - x = &DotExpr{Dot: dot, X: x, Name: id} - case LBRACK: - x = p.parseSliceSuffix(x) - case LPAREN: - x = p.parseCallSuffix(x) - default: - return x - } - } -} - -// slice_suffix = '[' expr? ':' expr? ':' expr? ']' -func (p *parser) parseSliceSuffix(x Expr) Expr { - lbrack := p.nextToken() - var lo, hi, step Expr - if p.tok != COLON { - y := p.parseExpr(false) - - // index x[y] - if p.tok == RBRACK { - rbrack := p.nextToken() - return &IndexExpr{X: x, Lbrack: lbrack, Y: y, Rbrack: rbrack} - } - - lo = y - } - - // slice or substring x[lo:hi:step] - if p.tok == COLON { - p.nextToken() - if p.tok != COLON && p.tok != RBRACK { - hi = p.parseTest() - } - } - if p.tok == COLON { - p.nextToken() - if p.tok != RBRACK { - step = p.parseTest() - } - } - rbrack := p.consume(RBRACK) - return &SliceExpr{X: x, Lbrack: lbrack, Lo: lo, Hi: hi, Step: step, Rbrack: rbrack} -} - -// call_suffix = '(' arg_list? ')' -func (p *parser) parseCallSuffix(fn Expr) Expr { - lparen := p.consume(LPAREN) - var rparen Position - var args []Expr - if p.tok == RPAREN { - rparen = p.nextToken() - } else { - args = p.parseArgs() - rparen = p.consume(RPAREN) - } - return &CallExpr{Fn: fn, Lparen: lparen, Args: args, Rparen: rparen} -} - -// parseArgs parses a list of actual parameter values (arguments). -// It mirrors the structure of parseParams. -// arg_list = ((arg COMMA)* arg COMMA?)? -func (p *parser) parseArgs() []Expr { - var args []Expr - stars := false - for p.tok != RPAREN && p.tok != EOF { - if len(args) > 0 { - p.consume(COMMA) - } - if p.tok == RPAREN { - // list can end with a COMMA if there is neither * nor ** - if stars { - p.in.errorf(p.in.getPos(), `got %#v, want argument`, p.tok) - } - break - } - - // *args or **kwargs - if p.tok == STAR || p.tok == STARSTAR { - stars = true - op := p.tok - pos := p.nextToken() - x := p.parseTest() - args = append(args, &UnaryExpr{ - OpPos: pos, - Op: op, - X: x, - }) - continue - } - - // We use a different strategy from Bazel here to stay within LL(1). - // Instead of looking ahead two tokens (IDENT, EQ) we parse - // 'test = test' then check that the first was an IDENT. - x := p.parseTest() - - if p.tok == EQ { - // name = value - if _, ok := x.(*Ident); !ok { - p.in.errorf(p.in.getPos(), "keyword argument must have form name=expr") - } - eq := p.nextToken() - y := p.parseTest() - x = &BinaryExpr{ - X: x, - OpPos: eq, - Op: EQ, - Y: y, - } - } - - args = append(args, x) - } - return args -} - -// primary = IDENT -// | INT | FLOAT -// | STRING -// | '[' ... // list literal or comprehension -// | '{' ... // dict literal or comprehension -// | '(' ... // tuple or parenthesized expression -// | ('-'|'+'|'~') primary_with_suffix -func (p *parser) parsePrimary() Expr { - switch p.tok { - case IDENT: - return p.parseIdent() - - case INT, FLOAT, STRING: - var val interface{} - tok := p.tok - switch tok { - case INT: - if p.tokval.bigInt != nil { - val = p.tokval.bigInt - } else { - val = p.tokval.int - } - case FLOAT: - val = p.tokval.float - case STRING: - val = p.tokval.string - } - raw := p.tokval.raw - pos := p.nextToken() - return &Literal{Token: tok, TokenPos: pos, Raw: raw, Value: val} - - case LBRACK: - return p.parseList() - - case LBRACE: - return p.parseDict() - - case LPAREN: - lparen := p.nextToken() - if p.tok == RPAREN { - // empty tuple - rparen := p.nextToken() - return &TupleExpr{Lparen: lparen, Rparen: rparen} - } - e := p.parseExpr(true) // allow trailing comma - rparen := p.consume(RPAREN) - return &ParenExpr{ - Lparen: lparen, - X: e, - Rparen: rparen, - } - - case MINUS, PLUS, TILDE: // unary - tok := p.tok - pos := p.nextToken() - x := p.parsePrimaryWithSuffix() - return &UnaryExpr{ - OpPos: pos, - Op: tok, - X: x, - } - } - p.in.errorf(p.in.getPos(), "got %#v, want primary expression", p.tok) - panic("unreachable") -} - -// list = '[' ']' -// | '[' expr ']' -// | '[' expr expr_list ']' -// | '[' expr (FOR loop_variables IN expr)+ ']' -func (p *parser) parseList() Expr { - lbrack := p.nextToken() - if p.tok == RBRACK { - // empty List - rbrack := p.nextToken() - return &ListExpr{Lbrack: lbrack, Rbrack: rbrack} - } - - x := p.parseTest() - - if p.tok == FOR { - // list comprehension - return p.parseComprehensionSuffix(lbrack, x, RBRACK) - } - - exprs := []Expr{x} - if p.tok == COMMA { - // multi-item list literal - exprs = p.parseExprs(exprs, true) // allow trailing comma - } - - rbrack := p.consume(RBRACK) - return &ListExpr{Lbrack: lbrack, List: exprs, Rbrack: rbrack} -} - -// dict = '{' '}' -// | '{' dict_entry_list '}' -// | '{' dict_entry FOR loop_variables IN expr '}' -func (p *parser) parseDict() Expr { - lbrace := p.nextToken() - if p.tok == RBRACE { - // empty dict - rbrace := p.nextToken() - return &DictExpr{Lbrace: lbrace, Rbrace: rbrace} - } - - x := p.parseDictEntry() - - if p.tok == FOR { - // dict comprehension - return p.parseComprehensionSuffix(lbrace, x, RBRACE) - } - - entries := []Expr{x} - for p.tok == COMMA { - p.nextToken() - if p.tok == RBRACE { - break - } - entries = append(entries, p.parseDictEntry()) - } - - rbrace := p.consume(RBRACE) - return &DictExpr{Lbrace: lbrace, List: entries, Rbrace: rbrace} -} - -// dict_entry = test ':' test -func (p *parser) parseDictEntry() *DictEntry { - k := p.parseTest() - colon := p.consume(COLON) - v := p.parseTest() - return &DictEntry{Key: k, Colon: colon, Value: v} -} - -// comp_suffix = FOR loopvars IN expr comp_suffix -// | IF expr comp_suffix -// | ']' or ')' (end) -// -// There can be multiple FOR/IF clauses; the first is always a FOR. -func (p *parser) parseComprehensionSuffix(lbrace Position, body Expr, endBrace Token) Expr { - var clauses []Node - for p.tok != endBrace { - if p.tok == FOR { - pos := p.nextToken() - vars := p.parseForLoopVariables() - in := p.consume(IN) - // Following Python 3, the operand of IN cannot be: - // - a conditional expression ('x if y else z'), - // due to conflicts in Python grammar - // ('if' is used by the comprehension); - // - a lambda expression - // - an unparenthesized tuple. - x := p.parseTestPrec(0) - clauses = append(clauses, &ForClause{For: pos, Vars: vars, In: in, X: x}) - } else if p.tok == IF { - pos := p.nextToken() - cond := p.parseTestNoCond() - clauses = append(clauses, &IfClause{If: pos, Cond: cond}) - } else { - p.in.errorf(p.in.getPos(), "got %#v, want '%s', for, or if", p.tok, endBrace) - } - } - rbrace := p.nextToken() - - return &Comprehension{ - Curly: endBrace == RBRACE, - Lbrack: lbrace, - Body: body, - Clauses: clauses, - Rbrack: rbrace, - } -} - -func terminatesExprList(tok Token) bool { - switch tok { - case EOF, NEWLINE, EQ, RBRACE, RBRACK, RPAREN, SEMI: - return true - } - return false -} - -// Comment assignment. -// We build two lists of all subnodes, preorder and postorder. -// The preorder list is ordered by start location, with outer nodes first. -// The postorder list is ordered by end location, with outer nodes last. -// We use the preorder list to assign each whole-line comment to the syntax -// immediately following it, and we use the postorder list to assign each -// end-of-line comment to the syntax immediately preceding it. - -// flattenAST returns the list of AST nodes, both in prefix order and in postfix -// order. -func flattenAST(root Node) (pre, post []Node) { - stack := []Node{} - Walk(root, func(n Node) bool { - if n != nil { - pre = append(pre, n) - stack = append(stack, n) - } else { - post = append(post, stack[len(stack)-1]) - stack = stack[:len(stack)-1] - } - return true - }) - return pre, post -} - -// assignComments attaches comments to nearby syntax. -func (p *parser) assignComments(n Node) { - // Leave early if there are no comments - if len(p.in.getLineComments())+len(p.in.getSuffixComments()) == 0 { - return - } - - pre, post := flattenAST(n) - - // Assign line comments to syntax immediately following. - line := p.in.getLineComments() - for _, x := range pre { - start, _ := x.Span() - - switch x.(type) { - case *File: - continue - } - - for len(line) > 0 && !start.isBefore(line[0].Start) { - x.AllocComments() - x.Comments().Before = append(x.Comments().Before, line[0]) - line = line[1:] - } - } - - // Remaining line comments go at end of file. - if len(line) > 0 { - n.AllocComments() - n.Comments().After = append(n.Comments().After, line...) - } - - // Assign suffix comments to syntax immediately before. - suffix := p.in.getSuffixComments() - for i := len(post) - 1; i >= 0; i-- { - x := post[i] - - // Do not assign suffix comments to file - switch x.(type) { - case *File: - continue - } - - _, end := x.Span() - if len(suffix) > 0 && end.isBefore(suffix[len(suffix)-1].Start) { - x.AllocComments() - x.Comments().Suffix = append(x.Comments().Suffix, suffix[len(suffix)-1]) - suffix = suffix[:len(suffix)-1] - } - } -} diff --git a/vendor/github.com/k14s/starlark-go/syntax/quote.go b/vendor/github.com/k14s/starlark-go/syntax/quote.go deleted file mode 100644 index cc9a8d0ae..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/quote.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Starlark quoted string utilities. - -import ( - "fmt" - "strconv" - "strings" -) - -// unesc maps single-letter chars following \ to their actual values. -var unesc = [256]byte{ - 'a': '\a', - 'b': '\b', - 'f': '\f', - 'n': '\n', - 'r': '\r', - 't': '\t', - 'v': '\v', - '\\': '\\', - '\'': '\'', - '"': '"', -} - -// esc maps escape-worthy bytes to the char that should follow \. -var esc = [256]byte{ - '\a': 'a', - '\b': 'b', - '\f': 'f', - '\n': 'n', - '\r': 'r', - '\t': 't', - '\v': 'v', - '\\': '\\', - '\'': '\'', - '"': '"', -} - -// notEsc is a list of characters that can follow a \ in a string value -// without having to escape the \. That is, since ( is in this list, we -// quote the Go string "foo\\(bar" as the Python literal "foo\(bar". -// This really does happen in BUILD files, especially in strings -// being used as shell arguments containing regular expressions. -const notEsc = " !#$%&()*+,-./:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~" - -// unquote unquotes the quoted string, returning the actual -// string value, whether the original was triple-quoted, and -// an error describing invalid input. -func unquote(quoted string) (s string, triple bool, err error) { - // Check for raw prefix: means don't interpret the inner \. - raw := false - if strings.HasPrefix(quoted, "r") { - raw = true - quoted = quoted[1:] - } - - if len(quoted) < 2 { - err = fmt.Errorf("string literal too short") - return - } - - if quoted[0] != '"' && quoted[0] != '\'' || quoted[0] != quoted[len(quoted)-1] { - err = fmt.Errorf("string literal has invalid quotes") - return - } - - // Check for triple quoted string. - quote := quoted[0] - if len(quoted) >= 6 && quoted[1] == quote && quoted[2] == quote && quoted[:3] == quoted[len(quoted)-3:] { - triple = true - quoted = quoted[3 : len(quoted)-3] - } else { - quoted = quoted[1 : len(quoted)-1] - } - - // Now quoted is the quoted data, but no quotes. - // If we're in raw mode or there are no escapes or - // carriage returns, we're done. - var unquoteChars string - if raw { - unquoteChars = "\r" - } else { - unquoteChars = "\\\r" - } - if !strings.ContainsAny(quoted, unquoteChars) { - s = quoted - return - } - - // Otherwise process quoted string. - // Each iteration processes one escape sequence along with the - // plain text leading up to it. - buf := new(strings.Builder) - for { - // Remove prefix before escape sequence. - i := strings.IndexAny(quoted, unquoteChars) - if i < 0 { - i = len(quoted) - } - buf.WriteString(quoted[:i]) - quoted = quoted[i:] - - if len(quoted) == 0 { - break - } - - // Process carriage return. - if quoted[0] == '\r' { - buf.WriteByte('\n') - if len(quoted) > 1 && quoted[1] == '\n' { - quoted = quoted[2:] - } else { - quoted = quoted[1:] - } - continue - } - - // Process escape sequence. - if len(quoted) == 1 { - err = fmt.Errorf(`truncated escape sequence \`) - return - } - - switch quoted[1] { - default: - // In Python, if \z (for some byte z) is not a known escape sequence - // then it appears as literal text in the string. - buf.WriteString(quoted[:2]) - quoted = quoted[2:] - - case '\n': - // Ignore the escape and the line break. - quoted = quoted[2:] - - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '\'', '"': - // One-char escape - buf.WriteByte(unesc[quoted[1]]) - quoted = quoted[2:] - - case '0', '1', '2', '3', '4', '5', '6', '7': - // Octal escape, up to 3 digits. - n := int(quoted[1] - '0') - quoted = quoted[2:] - for i := 1; i < 3; i++ { - if len(quoted) == 0 || quoted[0] < '0' || '7' < quoted[0] { - break - } - n = n*8 + int(quoted[0]-'0') - quoted = quoted[1:] - } - if n >= 256 { - // NOTE: Python silently discards the high bit, - // so that '\541' == '\141' == 'a'. - // Let's see if we can avoid doing that in BUILD files. - err = fmt.Errorf(`invalid escape sequence \%03o`, n) - return - } - buf.WriteByte(byte(n)) - - case 'x': - // Hexadecimal escape, exactly 2 digits. - if len(quoted) < 4 { - err = fmt.Errorf(`truncated escape sequence %s`, quoted) - return - } - n, err1 := strconv.ParseUint(quoted[2:4], 16, 0) - if err1 != nil { - err = fmt.Errorf(`invalid escape sequence %s`, quoted[:4]) - return - } - buf.WriteByte(byte(n)) - quoted = quoted[4:] - } - } - - s = buf.String() - return -} - -// indexByte returns the index of the first instance of b in s, or else -1. -func indexByte(s string, b byte) int { - for i := 0; i < len(s); i++ { - if s[i] == b { - return i - } - } - return -1 -} - -// hex is a list of the hexadecimal digits, for use in quoting. -// We always print lower-case hexadecimal. -const hex = "0123456789abcdef" - -// quote returns the quoted form of the string value "x". -// If triple is true, quote uses the triple-quoted form """x""". -func quote(unquoted string, triple bool) string { - q := `"` - if triple { - q = `"""` - } - - buf := new(strings.Builder) - buf.WriteString(q) - - for i := 0; i < len(unquoted); i++ { - c := unquoted[i] - if c == '"' && triple && (i+1 < len(unquoted) && unquoted[i+1] != '"' || i+2 < len(unquoted) && unquoted[i+2] != '"') { - // Can pass up to two quotes through, because they are followed by a non-quote byte. - buf.WriteByte(c) - if i+1 < len(unquoted) && unquoted[i+1] == '"' { - buf.WriteByte(c) - i++ - } - continue - } - if triple && c == '\n' { - // Can allow newline in triple-quoted string. - buf.WriteByte(c) - continue - } - if c == '\'' { - // Can allow ' since we always use ". - buf.WriteByte(c) - continue - } - if c == '\\' { - if i+1 < len(unquoted) && indexByte(notEsc, unquoted[i+1]) >= 0 { - // Can pass \ through when followed by a byte that - // known not to be a valid escape sequence and also - // that does not trigger an escape sequence of its own. - // Use this, because various BUILD files do. - buf.WriteByte('\\') - buf.WriteByte(unquoted[i+1]) - i++ - continue - } - } - if esc[c] != 0 { - buf.WriteByte('\\') - buf.WriteByte(esc[c]) - continue - } - if c < 0x20 || c >= 0x80 { - // BUILD files are supposed to be Latin-1, so escape all control and high bytes. - // I'd prefer to use \x here, but Blaze does not implement - // \x in quoted strings (b/7272572). - buf.WriteByte('\\') - buf.WriteByte(hex[c>>6]) // actually octal but reusing hex digits 0-7. - buf.WriteByte(hex[(c>>3)&7]) - buf.WriteByte(hex[c&7]) - /* - buf.WriteByte('\\') - buf.WriteByte('x') - buf.WriteByte(hex[c>>4]) - buf.WriteByte(hex[c&0xF]) - */ - continue - } - buf.WriteByte(c) - continue - } - - buf.WriteString(q) - return buf.String() -} diff --git a/vendor/github.com/k14s/starlark-go/syntax/scan.go b/vendor/github.com/k14s/starlark-go/syntax/scan.go deleted file mode 100644 index c653e1f1d..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/scan.go +++ /dev/null @@ -1,1088 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// A lexical scanner for Starlark. - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math/big" - "os" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// A Token represents a Starlark lexical token. -type Token int8 - -const ( - ILLEGAL Token = iota - EOF - - NEWLINE - INDENT - OUTDENT - - // Tokens with values - IDENT // x - INT // 123 - FLOAT // 1.23e45 - STRING // "foo" or 'foo' or '''foo''' or r'foo' or r"foo" - - // Punctuation - PLUS // + - MINUS // - - STAR // * - SLASH // / - SLASHSLASH // // - PERCENT // % - AMP // & - PIPE // | - CIRCUMFLEX // ^ - LTLT // << - GTGT // >> - TILDE // ~ - DOT // . - COMMA // , - EQ // = - SEMI // ; - COLON // : - LPAREN // ( - RPAREN // ) - LBRACK // [ - RBRACK // ] - LBRACE // { - RBRACE // } - LT // < - GT // > - GE // >= - LE // <= - EQL // == - NEQ // != - PLUS_EQ // += (keep order consistent with PLUS..GTGT) - MINUS_EQ // -= - STAR_EQ // *= - SLASH_EQ // /= - SLASHSLASH_EQ // //= - PERCENT_EQ // %= - AMP_EQ // &= - PIPE_EQ // |= - CIRCUMFLEX_EQ // ^= - LTLT_EQ // <<= - GTGT_EQ // >>= - STARSTAR // ** - - // Keywords - AND - BREAK - CONTINUE - DEF - ELIF - ELSE - FOR - IF - IN - LAMBDA - LOAD - NOT - NOT_IN // synthesized by parser from NOT IN - OR - PASS - RETURN - WHILE - - maxToken -) - -func (tok Token) String() string { return tokenNames[tok] } - -// GoString is like String but quotes punctuation tokens. -// Use Sprintf("%#v", tok) when constructing error messages. -func (tok Token) GoString() string { - if tok >= PLUS && tok <= STARSTAR { - return "'" + tokenNames[tok] + "'" - } - return tokenNames[tok] -} - -var tokenNames = [...]string{ - ILLEGAL: "illegal token", - EOF: "end of file", - NEWLINE: "newline", - INDENT: "indent", - OUTDENT: "outdent", - IDENT: "identifier", - INT: "int literal", - FLOAT: "float literal", - STRING: "string literal", - PLUS: "+", - MINUS: "-", - STAR: "*", - SLASH: "/", - SLASHSLASH: "//", - PERCENT: "%", - AMP: "&", - PIPE: "|", - CIRCUMFLEX: "^", - LTLT: "<<", - GTGT: ">>", - TILDE: "~", - DOT: ".", - COMMA: ",", - EQ: "=", - SEMI: ";", - COLON: ":", - LPAREN: "(", - RPAREN: ")", - LBRACK: "[", - RBRACK: "]", - LBRACE: "{", - RBRACE: "}", - LT: "<", - GT: ">", - GE: ">=", - LE: "<=", - EQL: "==", - NEQ: "!=", - PLUS_EQ: "+=", - MINUS_EQ: "-=", - STAR_EQ: "*=", - SLASH_EQ: "/=", - SLASHSLASH_EQ: "//=", - PERCENT_EQ: "%=", - AMP_EQ: "&=", - PIPE_EQ: "|=", - CIRCUMFLEX_EQ: "^=", - LTLT_EQ: "<<=", - GTGT_EQ: ">>=", - STARSTAR: "**", - AND: "and", - BREAK: "break", - CONTINUE: "continue", - DEF: "def", - ELIF: "elif", - ELSE: "else", - FOR: "for", - IF: "if", - IN: "in", - LAMBDA: "lambda", - LOAD: "load", - NOT: "not", - NOT_IN: "not in", - OR: "or", - PASS: "pass", - RETURN: "return", - WHILE: "while", -} - -// A Position describes the location of a rune of input. -type Position struct { - file *string // filename (indirect for compactness) - Line int32 // 1-based line number; 0 if line unknown - Col int32 // 1-based column (rune) number; 0 if column unknown -} - -// IsValid reports whether the position is valid. -func (p Position) IsValid() bool { return p.file != nil } - -// Filename returns the name of the file containing this position. -func (p Position) Filename() string { - if p.file != nil { - return *p.file - } - return "" -} - -// MakePosition returns position with the specified components. -func MakePosition(file *string, line, col int32) Position { return Position{file, line, col} } - -// add returns the position at the end of s, assuming it starts at p. -func (p Position) add(s string) Position { - if n := strings.Count(s, "\n"); n > 0 { - p.Line += int32(n) - s = s[strings.LastIndex(s, "\n")+1:] - p.Col = 1 - } - p.Col += int32(utf8.RuneCountInString(s)) - return p -} - -func (p Position) String() string { - file := p.Filename() - if p.Line > 0 { - if p.Col > 0 { - return fmt.Sprintf("%s:%d:%d", file, p.Line, p.Col) - } - return fmt.Sprintf("%s:%d", file, p.Line) - } - return file -} - -func (p Position) isBefore(q Position) bool { - if p.Line != q.Line { - return p.Line < q.Line - } - return p.Col < q.Col -} - -// An scanner represents a single input file being parsed. -type scanner struct { - rest []byte // rest of input (in REPL, a line of input) - token []byte // token being scanned - pos Position // current input position - depth int // nesting of [ ] { } ( ) - indentstk []int // stack of indentation levels - dents int // number of saved INDENT (>0) or OUTDENT (<0) tokens to return - lineStart bool // after NEWLINE; convert spaces to indentation tokens - keepComments bool // accumulate comments in slice - lineComments []Comment // list of full line comments (if keepComments) - suffixComments []Comment // list of suffix comments (if keepComments) - - readline func() ([]byte, error) // read next line of input (REPL only) -} - -func newScanner(filename string, src interface{}, keepComments bool) (*scanner, error) { - sc := &scanner{ - pos: Position{file: &filename, Line: 1, Col: 1}, - indentstk: make([]int, 1, 10), // []int{0} + spare capacity - lineStart: true, - keepComments: keepComments, - } - sc.readline, _ = src.(func() ([]byte, error)) // REPL only - if sc.readline == nil { - data, err := readSource(filename, src) - if err != nil { - return nil, err - } - sc.rest = data - } - return sc, nil -} - -func readSource(filename string, src interface{}) ([]byte, error) { - switch src := src.(type) { - case string: - return []byte(src), nil - case []byte: - return src, nil - case io.Reader: - data, err := ioutil.ReadAll(src) - if err != nil { - err = &os.PathError{Op: "read", Path: filename, Err: err} - } - return data, nil - case nil: - return ioutil.ReadFile(filename) - default: - return nil, fmt.Errorf("invalid source: %T", src) - } -} - -// An Error describes the nature and position of a scanner or parser error. -type Error struct { - Pos Position - Msg string -} - -func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } - -// errorf is called to report an error. -// errorf does not return: it panics. -func (sc *scanner) error(pos Position, s string) { - panic(Error{pos, s}) -} - -func (sc *scanner) errorf(pos Position, format string, args ...interface{}) { - sc.error(pos, fmt.Sprintf(format, args...)) -} - -func (sc *scanner) recover(err *error) { - // The scanner and parser panic both for routine errors like - // syntax errors and for programmer bugs like array index - // errors. Turn both into error returns. Catching bug panics - // is especially important when processing many files. - switch e := recover().(type) { - case nil: - // no panic - case Error: - *err = e - default: - *err = Error{sc.pos, fmt.Sprintf("internal error: %v", e)} - if debug { - log.Fatal(*err) - } - } -} - -// eof reports whether the input has reached end of file. -func (sc *scanner) eof() bool { - return len(sc.rest) == 0 && !sc.readLine() -} - -// readLine attempts to read another line of input. -// Precondition: len(sc.rest)==0. -func (sc *scanner) readLine() bool { - if sc.readline != nil { - var err error - sc.rest, err = sc.readline() - if err != nil { - sc.errorf(sc.pos, "%v", err) // EOF or ErrInterrupt - } - return len(sc.rest) > 0 - } - return false -} - -// peekRune returns the next rune in the input without consuming it. -// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. -func (sc *scanner) peekRune() rune { - // TODO(adonovan): opt: measure and perhaps inline eof. - if sc.eof() { - return 0 - } - - // fast path: ASCII - if b := sc.rest[0]; b < utf8.RuneSelf { - if b == '\r' { - return '\n' - } - return rune(b) - } - - r, _ := utf8.DecodeRune(sc.rest) - return r -} - -// readRune consumes and returns the next rune in the input. -// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. -func (sc *scanner) readRune() rune { - // eof() has been inlined here, both to avoid a call - // and to establish len(rest)>0 to avoid a bounds check. - if len(sc.rest) == 0 { - if !sc.readLine() { - sc.error(sc.pos, "internal scanner error: readRune at EOF") - } - // Redundant, but eliminates the bounds-check below. - if len(sc.rest) == 0 { - return 0 - } - } - - // fast path: ASCII - if b := sc.rest[0]; b < utf8.RuneSelf { - r := rune(b) - sc.rest = sc.rest[1:] - if r == '\r' { - if len(sc.rest) > 0 && sc.rest[0] == '\n' { - sc.rest = sc.rest[1:] - } - r = '\n' - } - if r == '\n' { - sc.pos.Line++ - sc.pos.Col = 1 - } else { - sc.pos.Col++ - } - return r - } - - r, size := utf8.DecodeRune(sc.rest) - sc.rest = sc.rest[size:] - sc.pos.Col++ - return r -} - -// tokenValue records the position and value associated with each token. -type tokenValue struct { - raw string // raw text of token - int int64 // decoded int - bigInt *big.Int // decoded integers > int64 - float float64 // decoded float - string string // decoded string - pos Position // start position of token -} - -// startToken marks the beginning of the next input token. -// It must be followed by a call to endToken once the token has -// been consumed using readRune. -func (sc *scanner) startToken(val *tokenValue) { - sc.token = sc.rest - val.raw = "" - val.pos = sc.pos -} - -// endToken marks the end of an input token. -// It records the actual token string in val.raw if the caller -// has not done that already. -func (sc *scanner) endToken(val *tokenValue) { - if val.raw == "" { - val.raw = string(sc.token[:len(sc.token)-len(sc.rest)]) - } -} - -// nextToken is called by the parser to obtain the next input token. -// It returns the token value and sets val to the data associated with -// the token. -// -// For all our input tokens, the associated data is val.pos (the -// position where the token begins), val.raw (the input string -// corresponding to the token). For string and int tokens, the string -// and int fields additionally contain the token's interpreted value. -func (sc *scanner) nextToken(val *tokenValue) Token { - - // The following distribution of tokens guides case ordering: - // - // COMMA 27 % - // STRING 23 % - // IDENT 15 % - // EQL 11 % - // LBRACK 5.5 % - // RBRACK 5.5 % - // NEWLINE 3 % - // LPAREN 2.9 % - // RPAREN 2.9 % - // INT 2 % - // others < 1 % - // - // Although NEWLINE tokens are infrequent, and lineStart is - // usually (~97%) false on entry, skipped newlines account for - // about 50% of all iterations of the 'start' loop. - -start: - var c rune - - // Deal with leading spaces and indentation. - blank := false - savedLineStart := sc.lineStart - if sc.lineStart { - sc.lineStart = false - col := 0 - for { - c = sc.peekRune() - if c == ' ' { - col++ - sc.readRune() - } else if c == '\t' { - const tab = 8 - col += int(tab - (sc.pos.Col-1)%tab) - sc.readRune() - } else { - break - } - } - - // The third clause matches EOF. - if c == '#' || c == '\n' || c == 0 { - blank = true - } - - // Compute indentation level for non-blank lines not - // inside an expression. This is not the common case. - if !blank && sc.depth == 0 { - cur := sc.indentstk[len(sc.indentstk)-1] - if col > cur { - // indent - sc.dents++ - sc.indentstk = append(sc.indentstk, col) - } else if col < cur { - // outdent(s) - for len(sc.indentstk) > 0 && col < sc.indentstk[len(sc.indentstk)-1] { - sc.dents-- - sc.indentstk = sc.indentstk[:len(sc.indentstk)-1] // pop - } - if col != sc.indentstk[len(sc.indentstk)-1] { - sc.error(sc.pos, "unindent does not match any outer indentation level") - } - } - } - } - - // Return saved indentation tokens. - if sc.dents != 0 { - sc.startToken(val) - sc.endToken(val) - if sc.dents < 0 { - sc.dents++ - return OUTDENT - } else { - sc.dents-- - return INDENT - } - } - - // start of line proper - c = sc.peekRune() - - // Skip spaces. - for c == ' ' || c == '\t' { - sc.readRune() - c = sc.peekRune() - } - - // comment - if c == '#' { - if sc.keepComments { - sc.startToken(val) - } - // Consume up to newline (included). - for c != 0 && c != '\n' { - sc.readRune() - c = sc.peekRune() - } - if sc.keepComments { - sc.endToken(val) - if blank { - sc.lineComments = append(sc.lineComments, Comment{val.pos, val.raw}) - } else { - sc.suffixComments = append(sc.suffixComments, Comment{val.pos, val.raw}) - } - } - } - - // newline - if c == '\n' { - sc.lineStart = true - - // Ignore newlines within expressions (common case). - if sc.depth > 0 { - sc.readRune() - goto start - } - - // Ignore blank lines, except in the REPL, - // where they emit OUTDENTs and NEWLINE. - if blank { - if sc.readline == nil { - sc.readRune() - goto start - } else if len(sc.indentstk) > 1 { - sc.dents = 1 - len(sc.indentstk) - sc.indentstk = sc.indentstk[:1] - goto start - } - } - - // At top-level (not in an expression). - sc.startToken(val) - sc.readRune() - val.raw = "\n" - return NEWLINE - } - - // end of file - if c == 0 { - // Emit OUTDENTs for unfinished indentation, - // preceded by a NEWLINE if we haven't just emitted one. - if len(sc.indentstk) > 1 { - if savedLineStart { - sc.dents = 1 - len(sc.indentstk) - sc.indentstk = sc.indentstk[:1] - goto start - } else { - sc.lineStart = true - sc.startToken(val) - val.raw = "\n" - return NEWLINE - } - } - - sc.startToken(val) - sc.endToken(val) - return EOF - } - - // line continuation - if c == '\\' { - sc.readRune() - if sc.peekRune() != '\n' { - sc.errorf(sc.pos, "stray backslash in program") - } - sc.readRune() - goto start - } - - // start of the next token - sc.startToken(val) - - // comma (common case) - if c == ',' { - sc.readRune() - sc.endToken(val) - return COMMA - } - - // string literal - if c == '"' || c == '\'' { - return sc.scanString(val, c) - } - - // identifier or keyword - if isIdentStart(c) { - // raw string literal - if c == 'r' && len(sc.rest) > 1 && (sc.rest[1] == '"' || sc.rest[1] == '\'') { - sc.readRune() - c = sc.peekRune() - return sc.scanString(val, c) - } - - for isIdent(c) { - sc.readRune() - c = sc.peekRune() - } - sc.endToken(val) - if k, ok := keywordToken[val.raw]; ok { - return k - } - - return IDENT - } - - // brackets - switch c { - case '[', '(', '{': - sc.depth++ - sc.readRune() - sc.endToken(val) - switch c { - case '[': - return LBRACK - case '(': - return LPAREN - case '{': - return LBRACE - } - panic("unreachable") - - case ']', ')', '}': - if sc.depth == 0 { - sc.errorf(sc.pos, "unexpected %q", c) - } else { - sc.depth-- - } - sc.readRune() - sc.endToken(val) - switch c { - case ']': - return RBRACK - case ')': - return RPAREN - case '}': - return RBRACE - } - panic("unreachable") - } - - // int or float literal, or period - if isdigit(c) || c == '.' { - return sc.scanNumber(val, c) - } - - // other punctuation - defer sc.endToken(val) - switch c { - case '=', '<', '>', '!', '+', '-', '%', '/', '&', '|', '^': // possibly followed by '=' - start := sc.pos - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - switch c { - case '<': - return LE - case '>': - return GE - case '=': - return EQL - case '!': - return NEQ - case '+': - return PLUS_EQ - case '-': - return MINUS_EQ - case '/': - return SLASH_EQ - case '%': - return PERCENT_EQ - case '&': - return AMP_EQ - case '|': - return PIPE_EQ - case '^': - return CIRCUMFLEX_EQ - } - } - switch c { - case '=': - return EQ - case '<': - if sc.peekRune() == '<' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return LTLT_EQ - } else { - return LTLT - } - } - return LT - case '>': - if sc.peekRune() == '>' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return GTGT_EQ - } else { - return GTGT - } - } - return GT - case '!': - sc.error(start, "unexpected input character '!'") - case '+': - return PLUS - case '-': - return MINUS - case '/': - if sc.peekRune() == '/' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return SLASHSLASH_EQ - } else { - return SLASHSLASH - } - } - return SLASH - case '%': - return PERCENT - case '&': - return AMP - case '|': - return PIPE - case '^': - return CIRCUMFLEX - } - panic("unreachable") - - case ':', ';', '~': // single-char tokens (except comma) - sc.readRune() - switch c { - case ':': - return COLON - case ';': - return SEMI - case '~': - return TILDE - } - panic("unreachable") - - case '*': // possibly followed by '*' or '=' - sc.readRune() - switch sc.peekRune() { - case '*': - sc.readRune() - return STARSTAR - case '=': - sc.readRune() - return STAR_EQ - } - return STAR - } - - sc.errorf(sc.pos, "unexpected input character %#q", c) - panic("unreachable") -} - -func (sc *scanner) scanString(val *tokenValue, quote rune) Token { - start := sc.pos - triple := len(sc.rest) >= 3 && sc.rest[0] == byte(quote) && sc.rest[1] == byte(quote) && sc.rest[2] == byte(quote) - sc.readRune() - if !triple { - // Precondition: startToken was already called. - for { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c := sc.readRune() - if c == quote { - break - } - if c == '\n' { - sc.error(val.pos, "unexpected newline in string") - } - if c == '\\' { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - sc.readRune() - } - } - sc.endToken(val) - } else { - // triple-quoted string literal - sc.readRune() - sc.readRune() - - // A triple-quoted string literal may span multiple - // gulps of REPL input; it is the only such token. - // Thus we must avoid {start,end}Token. - raw := new(strings.Builder) - - // Copy the prefix, e.g. r''' or """ (see startToken). - raw.Write(sc.token[:len(sc.token)-len(sc.rest)]) - - quoteCount := 0 - for { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c := sc.readRune() - raw.WriteRune(c) - if c == quote { - quoteCount++ - if quoteCount == 3 { - break - } - } else { - quoteCount = 0 - } - if c == '\\' { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c = sc.readRune() - raw.WriteRune(c) - } - } - val.raw = raw.String() - } - - s, _, err := unquote(val.raw) - if err != nil { - sc.error(start, err.Error()) - } - val.string = s - return STRING -} - -func (sc *scanner) scanNumber(val *tokenValue, c rune) Token { - // https://github.com/google/starlark-go/blob/master/doc/spec.md#lexical-elements - // - // Python features not supported: - // - integer literals of >64 bits of precision - // - 123L or 123l long suffix - // - traditional octal: 0755 - // https://docs.python.org/2/reference/lexical_analysis.html#integer-and-long-integer-literals - - start := sc.pos - fraction, exponent := false, false - - if c == '.' { - // dot or start of fraction - sc.readRune() - c = sc.peekRune() - if !isdigit(c) { - sc.endToken(val) - return DOT - } - fraction = true - } else if c == '0' { - // hex, octal, binary or float - sc.readRune() - c = sc.peekRune() - - if c == '.' { - fraction = true - } else if c == 'x' || c == 'X' { - // hex - sc.readRune() - c = sc.peekRune() - if !isxdigit(c) { - sc.error(start, "invalid hex literal") - } - for isxdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else if c == 'o' || c == 'O' { - // octal - sc.readRune() - c = sc.peekRune() - if !isodigit(c) { - sc.error(sc.pos, "invalid octal literal") - } - for isodigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else if c == 'b' || c == 'B' { - // binary - sc.readRune() - c = sc.peekRune() - if !isbdigit(c) { - sc.error(sc.pos, "invalid binary literal") - } - for isbdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else { - // float (or obsolete octal "0755") - allzeros, octal := true, true - for isdigit(c) { - if c != '0' { - allzeros = false - } - if c > '7' { - octal = false - } - sc.readRune() - c = sc.peekRune() - } - if c == '.' { - fraction = true - } else if c == 'e' || c == 'E' { - exponent = true - } else if octal && !allzeros { - sc.endToken(val) - sc.errorf(sc.pos, "obsolete form of octal literal; use 0o%s", val.raw[1:]) - } - } - } else { - // decimal - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - - if c == '.' { - fraction = true - } else if c == 'e' || c == 'E' { - exponent = true - } - } - - if fraction { - sc.readRune() // consume '.' - c = sc.peekRune() - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - - if c == 'e' || c == 'E' { - exponent = true - } - } - - if exponent { - sc.readRune() // consume [eE] - c = sc.peekRune() - if c == '+' || c == '-' { - sc.readRune() - c = sc.peekRune() - if !isdigit(c) { - sc.error(sc.pos, "invalid float literal") - } - } - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } - - sc.endToken(val) - if fraction || exponent { - var err error - val.float, err = strconv.ParseFloat(val.raw, 64) - if err != nil { - sc.error(sc.pos, "invalid float literal") - } - return FLOAT - } else { - var err error - s := val.raw - val.bigInt = nil - if len(s) > 2 && s[0] == '0' && (s[1] == 'o' || s[1] == 'O') { - val.int, err = strconv.ParseInt(s[2:], 8, 64) - } else if len(s) > 2 && s[0] == '0' && (s[1] == 'b' || s[1] == 'B') { - val.int, err = strconv.ParseInt(s[2:], 2, 64) - } else { - val.int, err = strconv.ParseInt(s, 0, 64) - if err != nil { - num := new(big.Int) - var ok bool = true - val.bigInt, ok = num.SetString(s, 0) - if ok { - err = nil - } - } - } - if err != nil { - sc.error(start, "invalid int literal") - } - return INT - } -} - -// isIdent reports whether c is an identifier rune. -func isIdent(c rune) bool { - return isdigit(c) || isIdentStart(c) -} - -func isIdentStart(c rune) bool { - return 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z' || - c == '_' || - unicode.IsLetter(c) -} - -func isdigit(c rune) bool { return '0' <= c && c <= '9' } -func isodigit(c rune) bool { return '0' <= c && c <= '7' } -func isxdigit(c rune) bool { return isdigit(c) || 'A' <= c && c <= 'F' || 'a' <= c && c <= 'f' } -func isbdigit(c rune) bool { return '0' == c || c == '1' } - -// keywordToken records the special tokens for -// strings that should not be treated as ordinary identifiers. -var keywordToken = map[string]Token{ - "and": AND, - "break": BREAK, - "continue": CONTINUE, - "def": DEF, - "elif": ELIF, - "else": ELSE, - "for": FOR, - "if": IF, - "in": IN, - "lambda": LAMBDA, - "load": LOAD, - "not": NOT, - "or": OR, - "pass": PASS, - "return": RETURN, - "while": WHILE, - - // reserved words: - "as": ILLEGAL, - // "assert": ILLEGAL, // heavily used by our tests - "class": ILLEGAL, - "del": ILLEGAL, - "except": ILLEGAL, - "finally": ILLEGAL, - "from": ILLEGAL, - "global": ILLEGAL, - "import": ILLEGAL, - "is": ILLEGAL, - "nonlocal": ILLEGAL, - "raise": ILLEGAL, - "try": ILLEGAL, - "with": ILLEGAL, - "yield": ILLEGAL, -} diff --git a/vendor/github.com/k14s/starlark-go/syntax/syntax.go b/vendor/github.com/k14s/starlark-go/syntax/syntax.go deleted file mode 100644 index 04fab7c3e..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/syntax.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package syntax provides a Starlark parser and abstract syntax tree. -package syntax // import "github.com/k14s/starlark-go/syntax" - -// A Node is a node in a Starlark syntax tree. -type Node interface { - // Span returns the start and end position of the expression. - Span() (start, end Position) - - // Comments returns the comments associated with this node. - // It returns nil if RetainComments was not specified during parsing, - // or if AllocComments was not called. - Comments() *Comments - - // AllocComments allocates a new Comments node if there was none. - // This makes possible to add new comments using Comments() method. - AllocComments() -} - -// A Comment represents a single # comment. -type Comment struct { - Start Position - Text string // without trailing newline -} - -// Comments collects the comments associated with an expression. -type Comments struct { - Before []Comment // whole-line comments before this expression - Suffix []Comment // end-of-line comments after this expression (up to 1) - - // For top-level expressions only, After lists whole-line - // comments following the expression. - After []Comment -} - -// A commentsRef is a possibly-nil reference to a set of comments. -// A commentsRef is embedded in each type of syntax node, -// and provides its Comments and AllocComments methods. -type commentsRef struct{ ref *Comments } - -// Comments returns the comments associated with a syntax node, -// or nil if AllocComments has not yet been called. -func (cr commentsRef) Comments() *Comments { return cr.ref } - -// AllocComments enables comments to be associated with a syntax node. -func (cr *commentsRef) AllocComments() { - if cr.ref == nil { - cr.ref = new(Comments) - } -} - -// Start returns the start position of the expression. -func Start(n Node) Position { - start, _ := n.Span() - return start -} - -// End returns the end position of the expression. -func End(n Node) Position { - _, end := n.Span() - return end -} - -// A File represents a Starlark file. -type File struct { - commentsRef - Path string - Stmts []Stmt - - Module interface{} // a *resolve.Module, set by resolver -} - -func (x *File) Span() (start, end Position) { - if len(x.Stmts) == 0 { - return - } - start, _ = x.Stmts[0].Span() - _, end = x.Stmts[len(x.Stmts)-1].Span() - return start, end -} - -// A Stmt is a Starlark statement. -type Stmt interface { - Node - stmt() -} - -func (*AssignStmt) stmt() {} -func (*BranchStmt) stmt() {} -func (*DefStmt) stmt() {} -func (*ExprStmt) stmt() {} -func (*ForStmt) stmt() {} -func (*WhileStmt) stmt() {} -func (*IfStmt) stmt() {} -func (*LoadStmt) stmt() {} -func (*ReturnStmt) stmt() {} - -// An AssignStmt represents an assignment: -// x = 0 -// x, y = y, x -// x += 1 -type AssignStmt struct { - commentsRef - OpPos Position - Op Token // = EQ | {PLUS,MINUS,STAR,PERCENT}_EQ - LHS Expr - RHS Expr -} - -func (x *AssignStmt) Span() (start, end Position) { - start, _ = x.LHS.Span() - _, end = x.RHS.Span() - return -} - -// A DefStmt represents a function definition. -type DefStmt struct { - commentsRef - Def Position - Name *Ident - Params []Expr // param = ident | ident=expr | * | *ident | **ident - Body []Stmt - - Function interface{} // a *resolve.Function, set by resolver -} - -func (x *DefStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.Def, end -} - -// An ExprStmt is an expression evaluated for side effects. -type ExprStmt struct { - commentsRef - X Expr -} - -func (x *ExprStmt) Span() (start, end Position) { - return x.X.Span() -} - -// An IfStmt is a conditional: If Cond: True; else: False. -// 'elseif' is desugared into a chain of IfStmts. -type IfStmt struct { - commentsRef - If Position // IF or ELIF - Cond Expr - True []Stmt - ElsePos Position // ELSE or ELIF - False []Stmt // optional -} - -func (x *IfStmt) Span() (start, end Position) { - body := x.False - if body == nil { - body = x.True - } - _, end = body[len(body)-1].Span() - return x.If, end -} - -// A LoadStmt loads another module and binds names from it: -// load(Module, "x", y="foo"). -// -// The AST is slightly unfaithful to the concrete syntax here because -// Starlark's load statement, so that it can be implemented in Python, -// binds some names (like y above) with an identifier and some (like x) -// without. For consistency we create fake identifiers for all the -// strings. -type LoadStmt struct { - commentsRef - Load Position - Module *Literal // a string - From []*Ident // name defined in loading module - To []*Ident // name in loaded module - Rparen Position -} - -func (x *LoadStmt) Span() (start, end Position) { - return x.Load, x.Rparen -} - -// ModuleName returns the name of the module loaded by this statement. -func (x *LoadStmt) ModuleName() string { return x.Module.Value.(string) } - -// A BranchStmt changes the flow of control: break, continue, pass. -type BranchStmt struct { - commentsRef - Token Token // = BREAK | CONTINUE | PASS - TokenPos Position -} - -func (x *BranchStmt) Span() (start, end Position) { - return x.TokenPos, x.TokenPos.add(x.Token.String()) -} - -// A ReturnStmt returns from a function. -type ReturnStmt struct { - commentsRef - Return Position - Result Expr // may be nil -} - -func (x *ReturnStmt) Span() (start, end Position) { - if x.Result == nil { - return x.Return, x.Return.add("return") - } - _, end = x.Result.Span() - return x.Return, end -} - -// An Expr is a Starlark expression. -type Expr interface { - Node - expr() -} - -func (*BinaryExpr) expr() {} -func (*CallExpr) expr() {} -func (*Comprehension) expr() {} -func (*CondExpr) expr() {} -func (*DictEntry) expr() {} -func (*DictExpr) expr() {} -func (*DotExpr) expr() {} -func (*Ident) expr() {} -func (*IndexExpr) expr() {} -func (*LambdaExpr) expr() {} -func (*ListExpr) expr() {} -func (*Literal) expr() {} -func (*ParenExpr) expr() {} -func (*SliceExpr) expr() {} -func (*TupleExpr) expr() {} -func (*UnaryExpr) expr() {} - -// An Ident represents an identifier. -type Ident struct { - commentsRef - NamePos Position - Name string - - Binding interface{} // a *resolver.Binding, set by resolver -} - -func (x *Ident) Span() (start, end Position) { - return x.NamePos, x.NamePos.add(x.Name) -} - -// A Literal represents a literal string or number. -type Literal struct { - commentsRef - Token Token // = STRING | INT - TokenPos Position - Raw string // uninterpreted text - Value interface{} // = string | int64 | *big.Int -} - -func (x *Literal) Span() (start, end Position) { - return x.TokenPos, x.TokenPos.add(x.Raw) -} - -// A ParenExpr represents a parenthesized expression: (X). -type ParenExpr struct { - commentsRef - Lparen Position - X Expr - Rparen Position -} - -func (x *ParenExpr) Span() (start, end Position) { - return x.Lparen, x.Rparen.add(")") -} - -// A CallExpr represents a function call expression: Fn(Args). -type CallExpr struct { - commentsRef - Fn Expr - Lparen Position - Args []Expr // arg = expr | ident=expr | *expr | **expr - Rparen Position -} - -func (x *CallExpr) Span() (start, end Position) { - start, _ = x.Fn.Span() - return start, x.Rparen.add(")") -} - -// A DotExpr represents a field or method selector: X.Name. -type DotExpr struct { - commentsRef - X Expr - Dot Position - NamePos Position - Name *Ident -} - -func (x *DotExpr) Span() (start, end Position) { - start, _ = x.X.Span() - _, end = x.Name.Span() - return -} - -// A Comprehension represents a list or dict comprehension: -// [Body for ... if ...] or {Body for ... if ...} -type Comprehension struct { - commentsRef - Curly bool // {x:y for ...} or {x for ...}, not [x for ...] - Lbrack Position - Body Expr - Clauses []Node // = *ForClause | *IfClause - Rbrack Position -} - -func (x *Comprehension) Span() (start, end Position) { - return x.Lbrack, x.Rbrack.add("]") -} - -// A ForStmt represents a loop: for Vars in X: Body. -type ForStmt struct { - commentsRef - For Position - Vars Expr // name, or tuple of names - X Expr - Body []Stmt -} - -func (x *ForStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.For, end -} - -// A WhileStmt represents a while loop: while X: Body. -type WhileStmt struct { - commentsRef - While Position - Cond Expr - Body []Stmt -} - -func (x *WhileStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.While, end -} - -// A ForClause represents a for clause in a list comprehension: for Vars in X. -type ForClause struct { - commentsRef - For Position - Vars Expr // name, or tuple of names - In Position - X Expr -} - -func (x *ForClause) Span() (start, end Position) { - _, end = x.X.Span() - return x.For, end -} - -// An IfClause represents an if clause in a list comprehension: if Cond. -type IfClause struct { - commentsRef - If Position - Cond Expr -} - -func (x *IfClause) Span() (start, end Position) { - _, end = x.Cond.Span() - return x.If, end -} - -// A DictExpr represents a dictionary literal: { List }. -type DictExpr struct { - commentsRef - Lbrace Position - List []Expr // all *DictEntrys - Rbrace Position -} - -func (x *DictExpr) Span() (start, end Position) { - return x.Lbrace, x.Rbrace.add("}") -} - -// A DictEntry represents a dictionary entry: Key: Value. -// Used only within a DictExpr. -type DictEntry struct { - commentsRef - Key Expr - Colon Position - Value Expr -} - -func (x *DictEntry) Span() (start, end Position) { - start, _ = x.Key.Span() - _, end = x.Value.Span() - return start, end -} - -// A LambdaExpr represents an inline function abstraction. -// -// Although they may be added in future, lambda expressions are not -// currently part of the Starlark spec, so their use is controlled by the -// resolver.AllowLambda flag. -type LambdaExpr struct { - commentsRef - Lambda Position - Params []Expr // param = ident | ident=expr | * | *ident | **ident - Body Expr - - Function interface{} // a *resolve.Function, set by resolver -} - -func (x *LambdaExpr) Span() (start, end Position) { - _, end = x.Body.Span() - return x.Lambda, end -} - -// A ListExpr represents a list literal: [ List ]. -type ListExpr struct { - commentsRef - Lbrack Position - List []Expr - Rbrack Position -} - -func (x *ListExpr) Span() (start, end Position) { - return x.Lbrack, x.Rbrack.add("]") -} - -// CondExpr represents the conditional: X if COND else ELSE. -type CondExpr struct { - commentsRef - If Position - Cond Expr - True Expr - ElsePos Position - False Expr -} - -func (x *CondExpr) Span() (start, end Position) { - start, _ = x.True.Span() - _, end = x.False.Span() - return start, end -} - -// A TupleExpr represents a tuple literal: (List). -type TupleExpr struct { - commentsRef - Lparen Position // optional (e.g. in x, y = 0, 1), but required if List is empty - List []Expr - Rparen Position -} - -func (x *TupleExpr) Span() (start, end Position) { - if x.Lparen.IsValid() { - return x.Lparen, x.Rparen - } else { - return Start(x.List[0]), End(x.List[len(x.List)-1]) - } -} - -// A UnaryExpr represents a unary expression: Op X. -// -// As a special case, UnaryOp{Op:Star} may also represent -// the star parameter in def f(*args) or def f(*, x). -type UnaryExpr struct { - commentsRef - OpPos Position - Op Token - X Expr // may be nil if Op==STAR -} - -func (x *UnaryExpr) Span() (start, end Position) { - if x.X != nil { - _, end = x.X.Span() - } else { - end = x.OpPos.add("*") - } - return x.OpPos, end -} - -// A BinaryExpr represents a binary expression: X Op Y. -// -// As a special case, BinaryExpr{Op:EQ} may also -// represent a named argument in a call f(k=v) -// or a named parameter in a function declaration -// def f(param=default). -type BinaryExpr struct { - commentsRef - X Expr - OpPos Position - Op Token - Y Expr -} - -func (x *BinaryExpr) Span() (start, end Position) { - start, _ = x.X.Span() - _, end = x.Y.Span() - return start, end -} - -// A SliceExpr represents a slice or substring expression: X[Lo:Hi:Step]. -type SliceExpr struct { - commentsRef - X Expr - Lbrack Position - Lo, Hi, Step Expr // all optional - Rbrack Position -} - -func (x *SliceExpr) Span() (start, end Position) { - start, _ = x.X.Span() - return start, x.Rbrack -} - -// An IndexExpr represents an index expression: X[Y]. -type IndexExpr struct { - commentsRef - X Expr - Lbrack Position - Y Expr - Rbrack Position -} - -func (x *IndexExpr) Span() (start, end Position) { - start, _ = x.X.Span() - return start, x.Rbrack -} diff --git a/vendor/github.com/k14s/starlark-go/syntax/walk.go b/vendor/github.com/k14s/starlark-go/syntax/walk.go deleted file mode 100644 index 1491149c6..000000000 --- a/vendor/github.com/k14s/starlark-go/syntax/walk.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Walk traverses a syntax tree in depth-first order. -// It starts by calling f(n); n must not be nil. -// If f returns true, Walk calls itself -// recursively for each non-nil child of n. -// Walk then calls f(nil). -func Walk(n Node, f func(Node) bool) { - if n == nil { - panic("nil") - } - if !f(n) { - return - } - - // TODO(adonovan): opt: order cases using profile data. - switch n := n.(type) { - case *File: - walkStmts(n.Stmts, f) - - case *ExprStmt: - Walk(n.X, f) - - case *BranchStmt: - // no-op - - case *IfStmt: - Walk(n.Cond, f) - walkStmts(n.True, f) - walkStmts(n.False, f) - - case *AssignStmt: - Walk(n.LHS, f) - Walk(n.RHS, f) - - case *DefStmt: - Walk(n.Name, f) - for _, param := range n.Params { - Walk(param, f) - } - walkStmts(n.Body, f) - - case *ForStmt: - Walk(n.Vars, f) - Walk(n.X, f) - walkStmts(n.Body, f) - - case *ReturnStmt: - if n.Result != nil { - Walk(n.Result, f) - } - - case *LoadStmt: - Walk(n.Module, f) - for _, from := range n.From { - Walk(from, f) - } - for _, to := range n.To { - Walk(to, f) - } - - case *Ident, *Literal: - // no-op - - case *ListExpr: - for _, x := range n.List { - Walk(x, f) - } - - case *ParenExpr: - Walk(n.X, f) - - case *CondExpr: - Walk(n.Cond, f) - Walk(n.True, f) - Walk(n.False, f) - - case *IndexExpr: - Walk(n.X, f) - Walk(n.Y, f) - - case *DictEntry: - Walk(n.Key, f) - Walk(n.Value, f) - - case *SliceExpr: - Walk(n.X, f) - if n.Lo != nil { - Walk(n.Lo, f) - } - if n.Hi != nil { - Walk(n.Hi, f) - } - if n.Step != nil { - Walk(n.Step, f) - } - - case *Comprehension: - Walk(n.Body, f) - for _, clause := range n.Clauses { - Walk(clause, f) - } - - case *IfClause: - Walk(n.Cond, f) - - case *ForClause: - Walk(n.Vars, f) - Walk(n.X, f) - - case *TupleExpr: - for _, x := range n.List { - Walk(x, f) - } - - case *DictExpr: - for _, entry := range n.List { - entry := entry.(*DictEntry) - Walk(entry.Key, f) - Walk(entry.Value, f) - } - - case *UnaryExpr: - if n.X != nil { - Walk(n.X, f) - } - - case *BinaryExpr: - Walk(n.X, f) - Walk(n.Y, f) - - case *DotExpr: - Walk(n.X, f) - Walk(n.Name, f) - - case *CallExpr: - Walk(n.Fn, f) - for _, arg := range n.Args { - Walk(arg, f) - } - - case *LambdaExpr: - for _, param := range n.Params { - Walk(param, f) - } - Walk(n.Body, f) - - default: - panic(n) - } - - f(nil) -} - -func walkStmts(stmts []Stmt, f func(Node) bool) { - for _, stmt := range stmts { - Walk(stmt, f) - } -} diff --git a/vendor/github.com/k14s/ytt/LICENSE b/vendor/github.com/k14s/ytt/LICENSE deleted file mode 100644 index dcae162bd..000000000 --- a/vendor/github.com/k14s/ytt/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/k14s/ytt/NOTICE b/vendor/github.com/k14s/ytt/NOTICE deleted file mode 100644 index 57fef3f10..000000000 --- a/vendor/github.com/k14s/ytt/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -ytt - -Copyright (c) 2019 - Present Pivotal Software, Inc. All Rights Reserved. - -This product is licensed to you under the Apache License, Version 2.0 (the "License"). You may not use this product except in compliance with the License. - -This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/template/bulk_input.go b/vendor/github.com/k14s/ytt/pkg/cmd/template/bulk_input.go deleted file mode 100644 index f9b50d499..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/template/bulk_input.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "encoding/json" - - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "github.com/spf13/cobra" -) - -type BulkFilesSourceOpts struct { - bulkIn string - bulkOut bool -} - -func (s *BulkFilesSourceOpts) Set(cmd *cobra.Command) { - cmd.Flags().StringVar(&s.bulkIn, "bulk-in", "", "Accept files in bulk format") - cmd.Flags().BoolVar(&s.bulkOut, "bulk-out", false, "Output files in bulk format") -} - -type BulkFilesSource struct { - opts BulkFilesSourceOpts - ui ui.UI -} - -type BulkFiles struct { - Files []BulkFile `json:"files,omitempty"` - Errors string `json:"errors,omitempty"` -} - -type BulkFile struct { - Name string `json:"name"` - Data string `json:"data"` -} - -func NewBulkFilesSource(opts BulkFilesSourceOpts, ui ui.UI) *BulkFilesSource { - return &BulkFilesSource{opts, ui} -} - -func (s *BulkFilesSource) HasInput() bool { return len(s.opts.bulkIn) > 0 } -func (s *BulkFilesSource) HasOutput() bool { return s.opts.bulkOut } - -func (s BulkFilesSource) Input() (Input, error) { - var fs BulkFiles - err := json.Unmarshal([]byte(s.opts.bulkIn), &fs) - if err != nil { - return Input{}, err - } - - var result []*files.File - - for _, f := range fs.Files { - file, err := files.NewFileFromSource(files.NewBytesSource(f.Name, []byte(f.Data))) - if err != nil { - return Input{}, err - } - - result = append(result, file) - } - - return Input{files.NewSortedFiles(result)}, nil -} - -func (s *BulkFilesSource) Output(out Output) error { - fs := BulkFiles{} - - if out.Err != nil { - fs.Errors = out.Err.Error() - } - - for _, outputFile := range out.Files { - fs.Files = append(fs.Files, BulkFile{ - Name: outputFile.RelativePath(), - Data: string(outputFile.Bytes()), - }) - } - - resultBytes, err := json.Marshal(fs) - if err != nil { - return err - } - - s.ui.Debugf("### result\n") - s.ui.Printf("%s", resultBytes) - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/template/cmd.go b/vendor/github.com/k14s/ytt/pkg/cmd/template/cmd.go deleted file mode 100644 index 6a130ded1..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/template/cmd.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "time" - - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "github.com/k14s/ytt/pkg/workspace" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/spf13/cobra" -) - -type Options struct { - IgnoreUnknownComments bool - ImplicitMapKeyOverrides bool - - StrictYAML bool - Debug bool - InspectFiles bool - - BulkFilesSourceOpts BulkFilesSourceOpts - RegularFilesSourceOpts RegularFilesSourceOpts - FileMarksOpts FileMarksOpts - DataValuesFlags DataValuesFlags -} - -type Input struct { - Files []*files.File -} - -type Output struct { - Files []files.OutputFile - DocSet *yamlmeta.DocumentSet - Err error -} - -type FileSource interface { - HasInput() bool - HasOutput() bool - Input() (Input, error) - Output(Output) error -} - -var _ []FileSource = []FileSource{&BulkFilesSource{}, &RegularFilesSource{}} - -func NewOptions() *Options { - return &Options{} -} - -func NewCmd(o *Options) *cobra.Command { - cmd := &cobra.Command{ - Use: "template", - Aliases: []string{"t", "tpl"}, - Short: "Process YAML templates (deprecated; use top-level command -- e.g. `ytt -f-` instead of `ytt template -f-`)", - RunE: func(c *cobra.Command, args []string) error { return o.Run() }, - } - cmd.Flags().BoolVar(&o.IgnoreUnknownComments, "ignore-unknown-comments", false, - "Configure whether unknown comments are considered as errors (comments that do not start with '#@' or '#!')") - cmd.Flags().BoolVar(&o.ImplicitMapKeyOverrides, "implicit-map-key-overrides", false, - "Configure whether implicit map keys overrides are allowed") - cmd.Flags().BoolVarP(&o.StrictYAML, "strict", "s", false, "Configure to use _strict_ YAML subset") - cmd.Flags().BoolVar(&o.Debug, "debug", false, "Enable debug output") - cmd.Flags().BoolVar(&o.InspectFiles, "files-inspect", false, "Inspect files") - - o.BulkFilesSourceOpts.Set(cmd) - o.RegularFilesSourceOpts.Set(cmd) - o.FileMarksOpts.Set(cmd) - o.DataValuesFlags.Set(cmd) - return cmd -} - -func (o *Options) Run() error { - ui := ui.NewTTY(o.Debug) - t1 := time.Now() - - defer func() { - ui.Debugf("total: %s\n", time.Now().Sub(t1)) - }() - - srcs := []FileSource{ - NewBulkFilesSource(o.BulkFilesSourceOpts, ui), - NewRegularFilesSource(o.RegularFilesSourceOpts, ui), - } - - in, err := o.pickSource(srcs, func(s FileSource) bool { return s.HasInput() }).Input() - if err != nil { - return err - } - - out := o.RunWithFiles(in, ui) - return o.pickSource(srcs, func(s FileSource) bool { return s.HasOutput() }).Output(out) -} - -func (o *Options) RunWithFiles(in Input, ui ui.UI) Output { - var err error - - in.Files, err = o.FileMarksOpts.Apply(in.Files) - if err != nil { - return Output{Err: err} - } - - rootLibrary := workspace.NewRootLibrary(in.Files) - rootLibrary.Print(ui.DebugWriter()) - - if o.InspectFiles { - return o.inspectFiles(rootLibrary) - } - - valuesOverlays, libraryValuesOverlays, err := o.DataValuesFlags.AsOverlays(o.StrictYAML) - if err != nil { - return Output{Err: err} - } - - libraryExecutionFactory := workspace.NewLibraryExecutionFactory(ui, workspace.TemplateLoaderOpts{ - IgnoreUnknownComments: o.IgnoreUnknownComments, - ImplicitMapKeyOverrides: o.ImplicitMapKeyOverrides, - StrictYAML: o.StrictYAML, - }) - - libraryCtx := workspace.LibraryExecutionContext{Current: rootLibrary, Root: rootLibrary} - rootLibraryExecution := libraryExecutionFactory.New(libraryCtx) - - schema, librarySchemas, err := rootLibraryExecution.Schemas(nil) - if err != nil { - return Output{Err: err} - } - - values, libraryValues, err := rootLibraryExecution.Values(valuesOverlays, schema) - if err != nil { - return Output{Err: err} - } - - libraryValues = append(libraryValues, libraryValuesOverlays...) - - if o.DataValuesFlags.Inspect { - return Output{ - DocSet: &yamlmeta.DocumentSet{ - Items: []*yamlmeta.Document{values.Doc}, - }, - } - } - - result, err := rootLibraryExecution.Eval(values, libraryValues, librarySchemas) - if err != nil { - return Output{Err: err} - } - - return Output{Files: result.Files, DocSet: result.DocSet} -} - -func (o *Options) pickSource(srcs []FileSource, pickFunc func(FileSource) bool) FileSource { - for _, src := range srcs { - if pickFunc(src) { - return src - } - } - return srcs[len(srcs)-1] -} - -func (o *Options) inspectFiles(rootLibrary *workspace.Library) Output { - accessibleFiles := rootLibrary.ListAccessibleFiles() - workspace.SortFilesInLibrary(accessibleFiles) - - paths := &yamlmeta.Array{} - - for _, fileInLib := range accessibleFiles { - paths.Items = append(paths.Items, &yamlmeta.ArrayItem{ - Value: fileInLib.File.RelativePath(), - }) - } - - return Output{ - DocSet: &yamlmeta.DocumentSet{ - Items: []*yamlmeta.Document{{Value: paths}}, - }, - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/template/data_values_file.go b/vendor/github.com/k14s/ytt/pkg/cmd/template/data_values_file.go deleted file mode 100644 index 2f3b8e7c6..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/template/data_values_file.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" - yttoverlay "github.com/k14s/ytt/pkg/yttlibrary/overlay" -) - -type DataValuesFile struct { - doc *yamlmeta.Document -} - -func NewDataValuesFile(doc *yamlmeta.Document) DataValuesFile { - return DataValuesFile{doc.DeepCopy()} -} - -func (f DataValuesFile) AsOverlay() (*yamlmeta.Document, error) { - doc := f.doc.DeepCopy() - - if yamltemplate.HasTemplating(doc) { - return nil, fmt.Errorf("Expected to not find annotations inside data values file " + - "(hint: remove comments starting with '#@')") - } - - f.addOverlayReplace(doc) - - return doc, nil -} - -func (f DataValuesFile) addOverlayReplace(node yamlmeta.Node) { - anns := template.NodeAnnotations{ - yttoverlay.AnnotationMatch: template.NodeAnnotation{ - Kwargs: []starlark.Tuple{{ - starlark.String(yttoverlay.MatchAnnotationKwargMissingOK), - starlark.Bool(true), - }}, - }, - } - - replaceAnn := template.NodeAnnotation{ - Kwargs: []starlark.Tuple{{ - starlark.String(yttoverlay.ReplaceAnnotationKwargOrAdd), - starlark.Bool(true), - }}, - } - - for _, val := range node.GetValues() { - switch typedVal := val.(type) { - case *yamlmeta.Array: - anns[yttoverlay.AnnotationReplace] = replaceAnn - case yamlmeta.Node: - f.addOverlayReplace(typedVal) - default: - anns[yttoverlay.AnnotationReplace] = replaceAnn - } - } - - node.SetAnnotations(anns) -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/template/data_values_flags.go b/vendor/github.com/k14s/ytt/pkg/cmd/template/data_values_flags.go deleted file mode 100644 index 2ad4383e9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/template/data_values_flags.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/workspace" - "github.com/k14s/ytt/pkg/yamlmeta" - yttoverlay "github.com/k14s/ytt/pkg/yttlibrary/overlay" - "github.com/spf13/cobra" -) - -const ( - dvsKVSep = "=" - dvsMapKeySep = "." -) - -type DataValuesFlags struct { - EnvFromStrings []string - EnvFromYAML []string - - KVsFromStrings []string - KVsFromYAML []string - KVsFromFiles []string - - FromFiles []string - - Inspect bool - - EnvironFunc func() []string - ReadFileFunc func(string) ([]byte, error) -} - -func (s *DataValuesFlags) Set(cmd *cobra.Command) { - cmd.Flags().StringArrayVar(&s.EnvFromStrings, "data-values-env", nil, "Extract data values (as strings) from prefixed env vars (format: PREFIX for PREFIX_all__key1=str) (can be specified multiple times)") - cmd.Flags().StringArrayVar(&s.EnvFromYAML, "data-values-env-yaml", nil, "Extract data values (parsed as YAML) from prefixed env vars (format: PREFIX for PREFIX_all__key1=true) (can be specified multiple times)") - - cmd.Flags().StringArrayVarP(&s.KVsFromStrings, "data-value", "v", nil, "Set specific data value to given value, as string (format: all.key1.subkey=123) (can be specified multiple times)") - cmd.Flags().StringArrayVar(&s.KVsFromYAML, "data-value-yaml", nil, "Set specific data value to given value, parsed as YAML (format: all.key1.subkey=true) (can be specified multiple times)") - cmd.Flags().StringArrayVar(&s.KVsFromFiles, "data-value-file", nil, "Set specific data value to given file contents, as string (format: all.key1.subkey=/file/path) (can be specified multiple times)") - - cmd.Flags().StringArrayVar(&s.FromFiles, "data-values-file", nil, "Set multiple data values via a YAML file (format: /file/path.yml) (can be specified multiple times)") - - cmd.Flags().BoolVar(&s.Inspect, "data-values-inspect", false, "Inspect data values") -} - -type dataValuesFlagsSource struct { - Values []string - TransformFunc valueTransformFunc - Name string -} - -type valueTransformFunc func(string) (interface{}, error) - -func (s *DataValuesFlags) AsOverlays(strict bool) ([]*workspace.DataValues, []*workspace.DataValues, error) { - plainValFunc := func(rawVal string) (interface{}, error) { return rawVal, nil } - - yamlValFunc := func(rawVal string) (interface{}, error) { - val, err := s.parseYAML(rawVal, strict) - if err != nil { - return nil, fmt.Errorf("Deserializing YAML value: %s", err) - } - return val, nil - } - - var result []*workspace.DataValues - - // Files go first - for _, file := range s.FromFiles { - vals, err := s.file(file, strict) - if err != nil { - return nil, nil, fmt.Errorf("Extracting data value from file: %s", err) - } - result = append(result, vals...) - } - - // Then env vars take precedence over files - // since env vars are specific to command execution - for _, src := range []dataValuesFlagsSource{{s.EnvFromStrings, plainValFunc, "data-values-env"}, {s.EnvFromYAML, yamlValFunc, "data-values-env-yaml"}} { - for _, envPrefix := range src.Values { - vals, err := s.env(envPrefix, src) - if err != nil { - return nil, nil, fmt.Errorf("Extracting data values from env under prefix '%s': %s", envPrefix, err) - } - result = append(result, vals...) - } - } - - // KVs take precedence over environment variables - for _, src := range []dataValuesFlagsSource{{s.KVsFromStrings, plainValFunc, "data-value"}, {s.KVsFromYAML, yamlValFunc, "data-value-yaml"}} { - for _, kv := range src.Values { - val, err := s.kv(kv, src) - if err != nil { - return nil, nil, fmt.Errorf("Extracting data value from KV: %s", err) - } - result = append(result, val) - } - } - - // Finally KV files take precedence over rest - // (technically should be same level as KVs, but gotta pick one) - for _, file := range s.KVsFromFiles { - val, err := s.kvFile(file) - if err != nil { - return nil, nil, fmt.Errorf("Extracting data value from file: %s", err) - } - result = append(result, val) - } - - var overlayValues []*workspace.DataValues - var libraryOverlays []*workspace.DataValues - for _, doc := range result { - if doc.IntendedForAnotherLibrary() { - libraryOverlays = append(libraryOverlays, doc) - } else { - overlayValues = append(overlayValues, doc) - } - } - - return overlayValues, libraryOverlays, nil -} - -func (s *DataValuesFlags) file(path string, strict bool) ([]*workspace.DataValues, error) { - libRef, path, err := s.libraryRefAndKey(path) - if err != nil { - return nil, err - } - - contents, err := s.readFile(path) - if err != nil { - return nil, fmt.Errorf("Reading file '%s'", path) - } - - docSetOpts := yamlmeta.DocSetOpts{ - AssociatedName: path, - Strict: strict, - } - - docSet, err := yamlmeta.NewDocumentSetFromBytes(contents, docSetOpts) - if err != nil { - return nil, fmt.Errorf("Unmarshaling YAML data values file '%s': %s", path, err) - } - - var result []*workspace.DataValues - - for _, doc := range docSet.Items { - if doc.Value != nil { - dvsOverlay, err := NewDataValuesFile(doc).AsOverlay() - if err != nil { - return nil, fmt.Errorf("Checking data values file '%s': %s", path, err) - } - dvs, err := workspace.NewDataValuesWithOptionalLib(dvsOverlay, libRef) - if err != nil { - return nil, err - } - result = append(result, dvs) - } - } - - return result, nil -} - -func (s *DataValuesFlags) env(prefix string, src dataValuesFlagsSource) ([]*workspace.DataValues, error) { - const ( - envKeyPrefix = "_" - envMapKeySep = "__" - ) - - result := []*workspace.DataValues{} - envVars := os.Environ() - - if s.EnvironFunc != nil { - envVars = s.EnvironFunc() - } - - libRef, keyPrefix, err := s.libraryRefAndKey(prefix) - if err != nil { - return nil, err - } - - for _, envVar := range envVars { - pieces := strings.SplitN(envVar, dvsKVSep, 2) - if len(pieces) != 2 { - return nil, fmt.Errorf("Expected env variable to be key-value pair (format: key=value)") - } - - if !strings.HasPrefix(pieces[0], keyPrefix+envKeyPrefix) { - continue - } - - val, err := src.TransformFunc(pieces[1]) - if err != nil { - return nil, fmt.Errorf("Extracting data value from env variable '%s': %s", pieces[0], err) - } - - // '__' gets translated into a '.' since periods may not be liked by shells - keyPieces := strings.Split(strings.TrimPrefix(pieces[0], keyPrefix+envKeyPrefix), envMapKeySep) - desc := fmt.Sprintf("(%s arg) %s", src.Name, keyPrefix) - overlay := s.buildOverlay(keyPieces, val, desc, envVar) - - dvs, err := workspace.NewDataValuesWithOptionalLib(overlay, libRef) - if err != nil { - return nil, err - } - - result = append(result, dvs) - } - - return result, nil -} - -func (s *DataValuesFlags) kv(kv string, src dataValuesFlagsSource) (*workspace.DataValues, error) { - pieces := strings.SplitN(kv, dvsKVSep, 2) - if len(pieces) != 2 { - return nil, fmt.Errorf("Expected format key=value") - } - - val, err := src.TransformFunc(pieces[1]) - if err != nil { - return nil, fmt.Errorf("Deserializing value for key '%s': %s", pieces[0], err) - } - - libRef, key, err := s.libraryRefAndKey(pieces[0]) - if err != nil { - return nil, err - } - desc := fmt.Sprintf("(%s arg)", src.Name) - overlay := s.buildOverlay(strings.Split(key, dvsMapKeySep), val, desc, kv) - - return workspace.NewDataValuesWithOptionalLib(overlay, libRef) -} - -func (s *DataValuesFlags) parseYAML(data string, strict bool) (interface{}, error) { - docSet, err := yamlmeta.NewParser(yamlmeta.ParserOpts{Strict: strict}).ParseBytes([]byte(data), "") - if err != nil { - return nil, err - } - return docSet.Items[0].Value, nil -} - -func (s *DataValuesFlags) kvFile(kv string) (*workspace.DataValues, error) { - pieces := strings.SplitN(kv, dvsKVSep, 2) - if len(pieces) != 2 { - return nil, fmt.Errorf("Expected format key=/file/path") - } - - contents, err := s.readFile(pieces[1]) - if err != nil { - return nil, fmt.Errorf("Reading file '%s'", pieces[1]) - } - - libRef, key, err := s.libraryRefAndKey(pieces[0]) - if err != nil { - return nil, err - } - desc := fmt.Sprintf("(data-value-file arg) %s=%s", key, pieces[1]) - overlay := s.buildOverlay(strings.Split(key, dvsMapKeySep), string(contents), desc, string(contents)) - - return workspace.NewDataValuesWithOptionalLib(overlay, libRef) -} - -func (DataValuesFlags) libraryRefAndKey(key string) (string, string, error) { - const ( - libraryKeySep = ":" - ) - - keyPieces := strings.Split(key, libraryKeySep) - - switch len(keyPieces) { - case 1: - return "", key, nil - - case 2: - if len(keyPieces[0]) == 0 { - return "", "", fmt.Errorf("Expected library ref to not be empty") - } - return keyPieces[0], keyPieces[1], nil - - default: - return "", "", fmt.Errorf("Expected at most one library-key separator '%s' in '%s'", libraryKeySep, key) - } -} - -func (s *DataValuesFlags) buildOverlay(keyPieces []string, value interface{}, desc string, line string) *yamlmeta.Document { - resultMap := &yamlmeta.Map{} - currMap := resultMap - var lastMapItem *yamlmeta.MapItem - - pos := filepos.NewPosition(1) - pos.SetFile(desc) - pos.SetLine(line) - - for _, piece := range keyPieces { - newMap := &yamlmeta.Map{} - lastMapItem = &yamlmeta.MapItem{Key: piece, Value: newMap, Position: pos} - - // Data values schemas should be enough to provide key checking/validations. - lastMapItem.SetAnnotations(template.NodeAnnotations{ - yttoverlay.AnnotationMatch: template.NodeAnnotation{ - Kwargs: []starlark.Tuple{{ - starlark.String(yttoverlay.MatchAnnotationKwargMissingOK), - starlark.Bool(true), - }}, - }, - }) - - currMap.Items = append(currMap.Items, lastMapItem) - currMap = newMap - } - - lastMapItem.Value = yamlmeta.NewASTFromInterface(value) - - // Explicitly replace entire value at given key - // (this allows to specify non-scalar data values) - existingAnns := template.NewAnnotations(lastMapItem) - existingAnns[yttoverlay.AnnotationReplace] = template.NodeAnnotation{ - Kwargs: []starlark.Tuple{{ - starlark.String(yttoverlay.ReplaceAnnotationKwargOrAdd), - starlark.Bool(true), - }}, - } - lastMapItem.SetAnnotations(existingAnns) - - return &yamlmeta.Document{Value: resultMap, Position: pos} -} - -func (s *DataValuesFlags) readFile(path string) ([]byte, error) { - if s.ReadFileFunc != nil { - return s.ReadFileFunc(path) - } - return ioutil.ReadFile(path) -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/template/file_marks.go b/vendor/github.com/k14s/ytt/pkg/cmd/template/file_marks.go deleted file mode 100644 index dcd43095f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/template/file_marks.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "regexp" - "strings" - - "github.com/k14s/ytt/pkg/files" - "github.com/spf13/cobra" -) - -type FileMarksOpts struct { - FileMarks []string -} - -func (s *FileMarksOpts) Set(cmd *cobra.Command) { - cmd.Flags().StringArrayVar(&s.FileMarks, "file-mark", nil, "File mark (ie change file path, mark as non-template) (format: file:key=value) (can be specified multiple times)") -} - -func (s *FileMarksOpts) Apply(filesToProcess []*files.File) ([]*files.File, error) { - var exclusiveForOutputFiles []*files.File - - for _, mark := range s.FileMarks { - pieces := strings.SplitN(mark, ":", 2) - if len(pieces) != 2 { - return nil, fmt.Errorf("Expected file mark '%s' to be in format path:key=value", mark) - } - - path := pieces[0] - - kv := strings.SplitN(pieces[1], "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("Expected file mark '%s' key-value portion to be in format key=value", mark) - } - - var matched bool - - for i, file := range filesToProcess { - if s.fileMarkMatches(file, path) { - matched = true - - switch kv[0] { - case "path": - file.MarkRelativePath(kv[1]) - - case "exclude": - switch kv[1] { - case "true": - filesToProcess[i] = nil - default: - return nil, fmt.Errorf("Unknown value in file mark '%s'", mark) - } - - case "type": - switch kv[1] { - case "yaml-template": // yaml template processing - file.MarkType(files.TypeYAML) - file.MarkTemplate(true) - case "yaml-plain": // no template processing - file.MarkType(files.TypeYAML) - file.MarkTemplate(false) - case "text-template": - file.MarkType(files.TypeText) - file.MarkTemplate(true) - case "text-plain": - file.MarkType(files.TypeText) - file.MarkTemplate(false) - case "starlark": - file.MarkType(files.TypeStarlark) - file.MarkTemplate(false) - case "data": - file.MarkType(files.TypeUnknown) - file.MarkTemplate(false) - default: - return nil, fmt.Errorf("Unknown value in file mark '%s'", mark) - } - - case "for-output": - switch kv[1] { - case "true": - file.MarkForOutput(true) - default: - return nil, fmt.Errorf("Unknown value in file mark '%s'", mark) - } - - case "exclusive-for-output": - switch kv[1] { - case "true": - exclusiveForOutputFiles = append(exclusiveForOutputFiles, file) - default: - return nil, fmt.Errorf("Unknown value in file mark '%s'", mark) - } - - default: - return nil, fmt.Errorf("Unknown key in file mark '%s'", mark) - } - } - } - - if !matched { - return nil, fmt.Errorf("Expected file mark '%s' to match at least one file by path, but did not", mark) - } - - // Remove files that were cleared out - filesToProcess = s.clearNils(filesToProcess) - } - - // If there is at least filtered output file, mark all others as non-templates - if len(exclusiveForOutputFiles) > 0 { - for _, file := range filesToProcess { - file.MarkForOutput(false) - } - for _, file := range exclusiveForOutputFiles { - file.MarkForOutput(true) - } - } - - return filesToProcess, nil -} - -var ( - quotedMultiLevel = regexp.QuoteMeta("**/*") - quotedSingleLevel = regexp.QuoteMeta("*") -) - -func (s *FileMarksOpts) fileMarkMatches(file *files.File, path string) bool { - path = regexp.QuoteMeta(path) - path = strings.Replace(path, quotedMultiLevel, ".+", 1) - path = strings.Replace(path, quotedSingleLevel, "[^/]+", 1) - return regexp.MustCompile("^" + path + "$").MatchString(file.OriginalRelativePath()) -} - -func (s *FileMarksOpts) clearNils(input []*files.File) []*files.File { - var output []*files.File - for _, file := range input { - if file != nil { - output = append(output, file) - } - } - return output -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/template/regular_input.go b/vendor/github.com/k14s/ytt/pkg/cmd/template/regular_input.go deleted file mode 100644 index 219e2328a..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/template/regular_input.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "io" - - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/spf13/cobra" -) - -const ( - regularFilesOutputTypeYAML = "yaml" - regularFilesOutputTypeJSON = "json" - regularFilesOutputTypePos = "pos" -) - -type RegularFilesSourceOpts struct { - files []string - - outputDir string - OutputFiles string - OutputType string - - files.SymlinkAllowOpts -} - -func (s *RegularFilesSourceOpts) Set(cmd *cobra.Command) { - cmd.Flags().StringArrayVarP(&s.files, "file", "f", nil, "File (ie local path, HTTP URL, -) (can be specified multiple times)") - - cmd.Flags().StringVar(&s.outputDir, "dangerous-emptied-output-directory", "", - "Delete given directory, and then create it with output files") - cmd.Flags().StringVar(&s.OutputFiles, "output-files", "", "Add output files to given directory") - - cmd.Flags().StringVarP(&s.OutputType, "output", "o", regularFilesOutputTypeYAML, "Output type (yaml, json, pos)") - - cmd.Flags().BoolVar(&s.SymlinkAllowOpts.AllowAll, "dangerous-allow-all-symlink-destinations", false, - "Symlinks to all destinations are allowed") - cmd.Flags().StringSliceVar(&s.SymlinkAllowOpts.AllowedDstPaths, "allow-symlink-destination", nil, - "File paths to which symlinks are allowed (can be specified multiple times)") -} - -type RegularFilesSource struct { - opts RegularFilesSourceOpts - ui ui.UI -} - -func NewRegularFilesSource(opts RegularFilesSourceOpts, ui ui.UI) *RegularFilesSource { - return &RegularFilesSource{opts, ui} -} - -func (s *RegularFilesSource) HasInput() bool { return len(s.opts.files) > 0 } -func (s *RegularFilesSource) HasOutput() bool { return true } - -func (s *RegularFilesSource) Input() (Input, error) { - filesToProcess, err := files.NewSortedFilesFromPaths(s.opts.files, s.opts.SymlinkAllowOpts) - if err != nil { - return Input{}, err - } - - return Input{Files: filesToProcess}, nil -} - -func (s *RegularFilesSource) Output(out Output) error { - if out.Err != nil { - return out.Err - } - - nonYamlFileNames := []string{} - switch { - case len(s.opts.outputDir) > 0: - return files.NewOutputDirectory(s.opts.outputDir, out.Files, s.ui).Write() - case len(s.opts.OutputFiles) > 0: - return files.NewOutputDirectory(s.opts.OutputFiles, out.Files, s.ui).WriteFiles() - default: - for _, file := range out.Files { - if file.Type() != files.TypeYAML { - nonYamlFileNames = append(nonYamlFileNames, file.RelativePath()) - } - } - } - - var printerFunc func(io.Writer) yamlmeta.DocumentPrinter - - switch s.opts.OutputType { - case regularFilesOutputTypeYAML: - printerFunc = nil - case regularFilesOutputTypeJSON: - printerFunc = func(w io.Writer) yamlmeta.DocumentPrinter { return yamlmeta.NewJSONPrinter(w) } - case regularFilesOutputTypePos: - printerFunc = func(w io.Writer) yamlmeta.DocumentPrinter { - return yamlmeta.WrappedFilePositionPrinter{yamlmeta.NewFilePositionPrinter(w)} - } - default: - return fmt.Errorf("Unknown output type '%s'", s.opts.OutputType) - } - - combinedDocBytes, err := out.DocSet.AsBytesWithPrinter(printerFunc) - if err != nil { - return fmt.Errorf("Marshaling combined template result: %s", err) - } - - s.ui.Debugf("### result\n") - s.ui.Printf("%s", combinedDocBytes) // no newline - - if len(nonYamlFileNames) > 0 { - s.ui.Warnf("\n" + `Warning: Found Non-YAML templates in input. Non-YAML templates are not rendered to standard output. -If you want to include those results, use the --output-files or --dangerous-emptied-output-directory flag.` + "\n") - } - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/ui/tty.go b/vendor/github.com/k14s/ytt/pkg/cmd/ui/tty.go deleted file mode 100644 index 235773c13..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/ui/tty.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package ui - -import ( - "fmt" - "io" - "os" -) - -type TTY struct { - debug bool - stdout io.Writer - stderr io.Writer -} - -var _ UI = TTY{} - -func NewTTY(debug bool) TTY { - return TTY{debug, os.Stdout, os.Stderr} -} - -func (t TTY) Printf(str string, args ...interface{}) { - fmt.Fprintf(t.stdout, str, args...) -} - -func (t TTY) Warnf(str string, args ...interface{}) { - fmt.Fprintf(t.stderr, str, args...) -} - -func (t TTY) Debugf(str string, args ...interface{}) { - if t.debug { - fmt.Fprintf(t.stderr, str, args...) - } -} - -func (t TTY) DebugWriter() io.Writer { - if t.debug { - return os.Stderr - } - return noopWriter{} -} - -type noopWriter struct{} - -var _ io.Writer = noopWriter{} - -func (w noopWriter) Write(data []byte) (int, error) { return len(data), nil } - -// Used for testing whether TTY writes correct output to stdout/stderr -func NewCustomWriterTTY(debug bool, stdout, stderr io.Writer) TTY { - if stdout == nil { - stdout = os.Stdout - } - if stderr == nil { - stderr = os.Stderr - } - return TTY{debug, stdout, stderr} -} diff --git a/vendor/github.com/k14s/ytt/pkg/cmd/ui/ui.go b/vendor/github.com/k14s/ytt/pkg/cmd/ui/ui.go deleted file mode 100644 index 88851fa7c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/cmd/ui/ui.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package ui - -import ( - "io" -) - -type UI interface { - Printf(string, ...interface{}) - Debugf(string, ...interface{}) - Warnf(str string, args ...interface{}) - DebugWriter() io.Writer -} diff --git a/vendor/github.com/k14s/ytt/pkg/filepos/position.go b/vendor/github.com/k14s/ytt/pkg/filepos/position.go deleted file mode 100644 index aae36671c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/filepos/position.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package filepos - -import ( - "fmt" -) - -type Position struct { - lineNum *int // 1 based - file string - line string - known bool - fromMemory bool -} - -func NewPosition(lineNum int) *Position { - if lineNum <= 0 { - panic("Lines are 1 based") - } - return &Position{lineNum: &lineNum, known: true} -} - -// NewUnknownPosition is equivalent of zero value *Position -func NewUnknownPosition() *Position { - return &Position{} -} - -func NewUnknownPositionWithKeyVal(k, v interface{}, separator string) *Position { - return &Position{line: fmt.Sprintf("%v%v %#v", k, separator, v), fromMemory: true} -} - -func (p *Position) SetFile(file string) { p.file = file } - -func (p *Position) SetLine(line string) { p.line = line } - -func (p *Position) IsKnown() bool { return p != nil && p.known } - -func (p *Position) FromMemory() bool { return p.fromMemory } - -func (p *Position) LineNum() int { - if !p.IsKnown() { - panic("Position is unknown") - } - if p.lineNum == nil { - panic("Position was not properly initialized") - } - return *p.lineNum -} - -func (p *Position) GetLine() string { - return p.line -} - -func (p *Position) AsString() string { - return "line " + p.AsCompactString() -} - -func (p *Position) GetFile() string { - return p.file -} - -func (p *Position) AsCompactString() string { - filePrefix := p.file - if len(filePrefix) > 0 { - filePrefix += ":" - } - if p.IsKnown() { - return fmt.Sprintf("%s%d", filePrefix, p.LineNum()) - } - return fmt.Sprintf("%s?", filePrefix) -} - -func (p *Position) AsIntString() string { - if p.IsKnown() { - return fmt.Sprintf("%d", p.LineNum()) - } - return "?" -} - -func (p *Position) As4DigitString() string { - if p.IsKnown() { - return fmt.Sprintf("%4d", p.LineNum()) - } - return "????" -} - -func (p *Position) DeepCopy() *Position { - if p == nil { - return nil - } - newPos := &Position{file: p.file, known: p.known, line: p.line} - if p.lineNum != nil { - lineVal := *p.lineNum - newPos.lineNum = &lineVal - } - return newPos -} - -func (p *Position) DeepCopyWithLineOffset(offset int) *Position { - if !p.IsKnown() { - panic("Position is unknown") - } - if offset < 0 { - panic("Unexpected line offset") - } - newPos := p.DeepCopy() - *newPos.lineNum += offset - return newPos -} diff --git a/vendor/github.com/k14s/ytt/pkg/files/file.go b/vendor/github.com/k14s/ytt/pkg/files/file.go deleted file mode 100644 index 9be81954e..000000000 --- a/vendor/github.com/k14s/ytt/pkg/files/file.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package files - -import ( - "fmt" - "os" - "path/filepath" - "sort" - "strings" -) - -var ( - yamlExts = []string{".yaml", ".yml"} - starlarkExts = []string{".star"} - textExts = []string{".txt"} - libraryExt = "lib" // eg .lib.yaml -) - -type Type int - -const ( - TypeUnknown Type = iota - TypeYAML - TypeText - TypeStarlark -) - -type File struct { - src Source - relPath string - - markedRelPath *string - markedType *Type - markedTemplate *bool - markedForOutput *bool - - order int // lowest comes first; 0 is used to indicate unsorted -} - -func NewSortedFilesFromPaths(paths []string, opts SymlinkAllowOpts) ([]*File, error) { - var groupedFiles [][]*File - - for _, path := range paths { - var files []*File - - relativePath := "" - pathPieces := strings.Split(path, "=") - - switch len(pathPieces) { - case 1: - // do nothing - case 2: - relativePath = pathPieces[0] - path = pathPieces[1] - default: - return nil, fmt.Errorf("Expected file '%s' to only have single '=' sign to for relative path assignment", path) - } - - switch { - case path == "-": - file, err := NewFileFromSource(NewCachedSource(NewStdinSource())) - if err != nil { - return nil, err - } - if len(relativePath) > 0 { - file.MarkRelativePath(relativePath) - } - files = append(files, file) - - case strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://"): - file, err := NewFileFromSource(NewCachedSource(NewHTTPSource(path))) - if err != nil { - return nil, err - } - if len(relativePath) > 0 { - file.MarkRelativePath(relativePath) - } - files = append(files, file) - - default: - fileInfo, err := os.Lstat(path) - if err != nil { - return nil, fmt.Errorf("Checking file '%s': %s", path, err) - } - - if fileInfo.IsDir() { - err := filepath.Walk(path, func(walkedPath string, fi os.FileInfo, err error) error { - if err != nil || fi.IsDir() { - return err - } - regLocalSource, err := NewRegularFileLocalSource(walkedPath, path, fi, opts) - if err != nil { - return err - } - file, err := NewFileFromSource(NewCachedSource(regLocalSource)) - if err != nil { - return err - } - // TODO relative path for directories? - files = append(files, file) - return nil - }) - if err != nil { - return nil, fmt.Errorf("Listing files '%s': %s", path, err) - } - } else { - regLocalSource, err := NewRegularFileLocalSource(path, "", fileInfo, opts) - if err != nil { - return nil, err - } - file, err := NewFileFromSource(NewCachedSource(regLocalSource)) - if err != nil { - return nil, err - } - if len(relativePath) > 0 { - file.MarkRelativePath(relativePath) - } - files = append(files, file) - } - } - - groupedFiles = append(groupedFiles, files) - } - - var allFiles []*File - currOrder := 1 - - for _, files := range groupedFiles { - // Only sort files alphanum within a group - sort.Slice(files, func(i, j int) bool { - return files[i].RelativePath() < files[j].RelativePath() - }) - - for _, file := range files { - file.order = currOrder - currOrder++ - } - - allFiles = append(allFiles, files...) - } - - return allFiles, nil -} - -func NewSortedFiles(files []*File) []*File { - currOrder := 1 - for _, file := range files { - file.order = currOrder - currOrder++ - } - return files -} - -func NewFileFromSource(fileSrc Source) (*File, error) { - relPath, err := fileSrc.RelativePath() - if err != nil { - return nil, fmt.Errorf("Calculating relative path for '%s': %s", fileSrc, err) - } - - return &File{src: fileSrc, relPath: filepath.ToSlash(relPath)}, nil -} - -func MustNewFileFromSource(fileSrc Source) *File { - file, err := NewFileFromSource(fileSrc) - if err != nil { - panic(err) - } - return file -} - -func (r *File) Description() string { return r.src.Description() } - -func (r *File) OriginalRelativePath() string { return r.relPath } - -func (r *File) MarkRelativePath(relPath string) { r.markedRelPath = &relPath } - -func (r *File) RelativePath() string { - if r.markedRelPath != nil { - return *r.markedRelPath - } - return r.relPath -} - -func (r *File) Bytes() ([]byte, error) { return r.src.Bytes() } - -func (r *File) MarkType(t Type) { r.markedType = &t } - -func (r *File) Type() Type { - if r.markedType != nil { - return *r.markedType - } - - switch { - case r.matchesExt(yamlExts): - return TypeYAML - case r.matchesExt(starlarkExts): - return TypeStarlark - case r.matchesExt(textExts): - return TypeText - default: - return TypeUnknown - } -} - -func (r *File) MarkForOutput(forOutput bool) { r.markedForOutput = &forOutput } - -func (r *File) IsForOutput() bool { - if r.markedForOutput != nil { - return *r.markedForOutput - } - if r.markedTemplate != nil { - // it may still be for output, even though it's not a template - if *r.markedTemplate { - return true - } - } - return r.isTemplate() -} - -func (r *File) MarkTemplate(template bool) { r.markedTemplate = &template } - -func (r *File) IsTemplate() bool { - if r.markedTemplate != nil { - return *r.markedTemplate - } - return r.isTemplate() -} - -func (r *File) isTemplate() bool { - t := r.Type() - return !r.IsLibrary() && (t == TypeYAML || t == TypeText) -} - -func (r *File) IsLibrary() bool { - exts := strings.Split(filepath.Base(r.RelativePath()), ".") - - if len(exts) > 2 && exts[len(exts)-2] == libraryExt { - return true - } - - // make exception for starlark files as they are just pure code - return r.matchesExt(starlarkExts) -} - -func (r *File) matchesExt(exts []string) bool { - filename := filepath.Base(r.RelativePath()) - for _, ext := range exts { - if strings.HasSuffix(filename, ext) { - return true - } - } - return false -} - -func (r *File) OrderLess(otherFile *File) bool { - if r.order == 0 || otherFile.order == 0 { - panic("Missing file order assignment") - } - return r.order < otherFile.order -} - -func NewRegularFileLocalSource(path, dir string, fi os.FileInfo, opts SymlinkAllowOpts) (LocalSource, error) { - isRegFile := (fi.Mode() & os.ModeType) == 0 - isSymlink := (fi.Mode() & os.ModeSymlink) != 0 - isNamedPipe := (fi.Mode() & os.ModeNamedPipe) != 0 // allow pipes (`ytt -f <(echo "---")`) - - switch { - case isRegFile || isSymlink || isNamedPipe: - // do nothing - default: - return LocalSource{}, fmt.Errorf("Expected file '%s' to be a regular file, but was not", path) - } - - if isSymlink { - err := Symlink{path}.IsAllowed(opts) - if err != nil { - return LocalSource{}, fmt.Errorf("Checking symlink file '%s': %s", path, err) - } - } - - return NewLocalSource(path, dir), nil -} - -const ( - pathSeparator = "/" -) - -func SplitPath(path string) ([]string, string) { - pieces := strings.Split(path, pathSeparator) - if len(pieces) == 1 { - return nil, pieces[0] - } - return pieces[:len(pieces)-1], pieces[len(pieces)-1] -} - -func JoinPath(pieces []string) string { - return strings.Join(pieces, pathSeparator) -} - -func IsRootPath(path string) bool { - return strings.HasPrefix(path, pathSeparator) -} - -func StripRootPath(path string) string { - return path[len(pathSeparator):] -} - -func MakeRootPath(path string) string { - return pathSeparator + path -} diff --git a/vendor/github.com/k14s/ytt/pkg/files/output_directory.go b/vendor/github.com/k14s/ytt/pkg/files/output_directory.go deleted file mode 100644 index e8ce17ad4..000000000 --- a/vendor/github.com/k14s/ytt/pkg/files/output_directory.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package files - -import ( - "fmt" - "os" - "strings" - - "github.com/k14s/ytt/pkg/cmd/ui" -) - -var ( - suspiciousOutputDirectoryPaths = []string{"/", ".", "./", ""} -) - -type OutputDirectory struct { - path string - files []OutputFile - ui ui.UI -} - -func NewOutputDirectory(path string, files []OutputFile, ui ui.UI) *OutputDirectory { - return &OutputDirectory{path, files, ui} -} - -func (d *OutputDirectory) Files() []OutputFile { return d.files } - -func (d *OutputDirectory) Write() error { - filePaths := map[string]struct{}{} - - for _, file := range d.files { - path := file.RelativePath() - if _, found := filePaths[path]; found { - return fmt.Errorf("Multiple files have same output destination paths: %s", path) - } - filePaths[path] = struct{}{} - } - - for _, path := range suspiciousOutputDirectoryPaths { - if d.path == path { - return fmt.Errorf("Expected output directory path to not be one of '%s'", - strings.Join(suspiciousOutputDirectoryPaths, "', '")) - } - } - - err := os.RemoveAll(d.path) - if err != nil { - return err - } - - return d.WriteFiles() -} - -func (d *OutputDirectory) WriteFiles() error { - err := os.MkdirAll(d.path, 0700) - if err != nil { - return err - } - - for _, file := range d.files { - d.ui.Printf("creating: %s\n", file.Path(d.path)) - - err := file.Create(d.path) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/files/output_file.go b/vendor/github.com/k14s/ytt/pkg/files/output_file.go deleted file mode 100644 index 0b4784b36..000000000 --- a/vendor/github.com/k14s/ytt/pkg/files/output_file.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package files - -import ( - "os" - "path/filepath" -) - -type OutputFile struct { - relativePath string - data []byte - markedType Type -} - -func NewOutputFile(relativePath string, data []byte, markedType Type) OutputFile { - return OutputFile{relativePath, data, markedType} -} - -func (f OutputFile) RelativePath() string { return f.relativePath } -func (f OutputFile) Bytes() []byte { return f.data } - -func (f OutputFile) Path(dirPath string) string { - return filepath.Join(dirPath, f.relativePath) -} - -func (f OutputFile) Type() Type { - return f.markedType -} - -func (f OutputFile) Create(dirPath string) error { - resultPath := f.Path(dirPath) - - err := os.MkdirAll(filepath.Dir(resultPath), 0700) - if err != nil { - return err - } - - fd, err := os.OpenFile(resultPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700) - if err != nil { - return err - } - - defer fd.Close() - - _, err = fd.Write(f.data) - return err -} diff --git a/vendor/github.com/k14s/ytt/pkg/files/sources.go b/vendor/github.com/k14s/ytt/pkg/files/sources.go deleted file mode 100644 index 6573eca7c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/files/sources.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package files - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "strings" -) - -type Source interface { - Description() string - RelativePath() (string, error) - Bytes() ([]byte, error) -} - -var _ []Source = []Source{BytesSource{}, StdinSource{}, - LocalSource{}, HTTPSource{}, &CachedSource{}} - -type BytesSource struct { - path string - data []byte -} - -func NewBytesSource(path string, data []byte) BytesSource { return BytesSource{path, data} } - -func (s BytesSource) Description() string { return s.path } -func (s BytesSource) RelativePath() (string, error) { return s.path, nil } -func (s BytesSource) Bytes() ([]byte, error) { return s.data, nil } - -type StdinSource struct { - bytes []byte - err error -} - -func NewStdinSource() StdinSource { - // only read stdin once - bs, err := ioutil.ReadAll(os.Stdin) - return StdinSource{bs, err} -} - -func (s StdinSource) Description() string { return "stdin.yml" } -func (s StdinSource) RelativePath() (string, error) { return "stdin.yml", nil } -func (s StdinSource) Bytes() ([]byte, error) { return s.bytes, s.err } - -type LocalSource struct { - path string - dir string -} - -func NewLocalSource(path, dir string) LocalSource { return LocalSource{path, dir} } - -func (s LocalSource) Description() string { return fmt.Sprintf("file '%s'", s.path) } - -func (s LocalSource) RelativePath() (string, error) { - if s.dir == "" { - return filepath.Base(s.path), nil - } - - cleanPath, err := filepath.Abs(filepath.Clean(s.path)) - if err != nil { - return "", err - } - - cleanDir, err := filepath.Abs(filepath.Clean(s.dir)) - if err != nil { - return "", err - } - - if strings.HasPrefix(cleanPath, cleanDir) { - result := strings.TrimPrefix(cleanPath, cleanDir) - result = strings.TrimPrefix(result, string(os.PathSeparator)) - return result, nil - } - - return "", fmt.Errorf("unknown relative path for %s", s.path) -} - -func (s LocalSource) Bytes() ([]byte, error) { return ioutil.ReadFile(s.path) } - -type HTTPSource struct { - url string -} - -func NewHTTPSource(path string) HTTPSource { return HTTPSource{path} } - -func (s HTTPSource) Description() string { - return fmt.Sprintf("HTTP URL '%s'", s.url) -} - -func (s HTTPSource) RelativePath() (string, error) { return path.Base(s.url), nil } - -func (s HTTPSource) Bytes() ([]byte, error) { - resp, err := http.Get(s.url) - if err != nil { - return nil, fmt.Errorf("Requesting URL '%s': %s", s.url, err) - } - - defer resp.Body.Close() - - result, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("Reading URL '%s': %s", s.url, err) - } - - return result, nil -} - -type CachedSource struct { - src Source - - bytesFetched bool - bytes []byte - bytesErr error -} - -func NewCachedSource(src Source) *CachedSource { return &CachedSource{src: src} } - -func (s *CachedSource) Description() string { return s.src.Description() } -func (s *CachedSource) RelativePath() (string, error) { return s.src.RelativePath() } - -func (s *CachedSource) Bytes() ([]byte, error) { - if s.bytesFetched { - return s.bytes, s.bytesErr - } - - s.bytesFetched = true - s.bytes, s.bytesErr = s.src.Bytes() - - return s.bytes, s.bytesErr -} diff --git a/vendor/github.com/k14s/ytt/pkg/files/symlink.go b/vendor/github.com/k14s/ytt/pkg/files/symlink.go deleted file mode 100644 index 056fe8613..000000000 --- a/vendor/github.com/k14s/ytt/pkg/files/symlink.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package files - -import ( - "fmt" - "path/filepath" - "regexp" - "strings" -) - -type Symlink struct { - path string -} - -type SymlinkAllowOpts struct { - AllowAll bool - AllowedDstPaths []string -} - -var ( - symlinkPipeErrMsg = regexp.QuoteMeta("lstat /proc/NUM/fd/pipe:[NUM]: no such file or directory") - symlinkPipeErr = regexp.MustCompile("^" + strings.Replace(symlinkPipeErrMsg, "NUM", "\\d+", -1) + "$") -) - -func (s Symlink) IsAllowed(opts SymlinkAllowOpts) error { - if opts.AllowAll { - return nil - } - - dstPath, err := filepath.EvalSymlinks(s.path) - if err != nil { - // Note that on Linux resolving symlink /dev/fd/3 fails: - // "lstat /proc/3719/fd/pipe:[903476724]: no such file or directory" - // Since file doesnt actually exist on FS, it could not have been tricked to be included. - if symlinkPipeErr.MatchString(err.Error()) { - return nil - } - return fmt.Errorf("Eval symlink: %s", err) - } - - for _, allowedDstPath := range opts.AllowedDstPaths { - matched, err := s.isIn(dstPath, allowedDstPath) - if matched || err != nil { - return err - } - } - - return fmt.Errorf("Expected symlink file '%s' -> '%s' to be allowed, but was not", s.path, dstPath) -} - -func (s Symlink) isIn(path, allowedPath string) (bool, error) { - var err error - - // Abs runs clean on the result - path, err = filepath.Abs(path) - if err != nil { - return false, fmt.Errorf("Abs path '%s': %s", path, err) - } - - allowedPath, err = filepath.Abs(allowedPath) - if err != nil { - return false, fmt.Errorf("Abs path '%s': %s", allowedPath, err) - } - - pathPieces := s.pathPieces(path) - allowedPathPieces := s.pathPieces(allowedPath) - - if len(allowedPathPieces) > len(pathPieces) { - return false, nil - } - - for i := range allowedPathPieces { - if allowedPathPieces[i] != pathPieces[i] { - return false, nil - } - } - - return true, nil -} - -func (s Symlink) pathPieces(path string) []string { - if path == string(filepath.Separator) { - return []string{""} - } - return strings.Split(path, string(filepath.Separator)) -} diff --git a/vendor/github.com/k14s/ytt/pkg/orderedmap/convert.go b/vendor/github.com/k14s/ytt/pkg/orderedmap/convert.go deleted file mode 100644 index c29db025e..000000000 --- a/vendor/github.com/k14s/ytt/pkg/orderedmap/convert.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package orderedmap - -import ( - "fmt" - "sort" -) - -type Conversion struct { - Object interface{} -} - -func (c Conversion) AsUnorderedStringMaps() interface{} { - return c.asUnorderedStringMaps(c.Object) -} - -func (c Conversion) asUnorderedStringMaps(object interface{}) interface{} { - switch typedObj := object.(type) { - case map[interface{}]interface{}: - panic("Expected *orderedmap.Map instead of map[interface{}]interface{} in asUnorderedStringMaps") - - case map[string]interface{}: - panic("Expected *orderedmap.Map instead of map[string]interface{} in asUnorderedStringMaps") - - case *Map: - result := map[string]interface{}{} - typedObj.Iterate(func(k, v interface{}) { - if strK, ok := k.(string); ok { - result[strK] = c.asUnorderedStringMaps(v) - } else { - panic("Expected key to be string") - } - }) - return result - - case []interface{}: - for i, item := range typedObj { - typedObj[i] = c.asUnorderedStringMaps(item) - } - return typedObj - - default: - return typedObj - } -} - -func (c Conversion) FromUnorderedMaps() interface{} { - return c.fromUnorderedMaps(c.Object) -} - -func (c Conversion) fromUnorderedMaps(object interface{}) interface{} { - switch typedObj := object.(type) { - case map[interface{}]interface{}: - result := NewMap() - for _, key := range c.sortedMapKeys(c.mapKeysFromInterfaceMap(typedObj)) { - result.Set(key, c.fromUnorderedMaps(typedObj[key])) - } - return result - - case map[string]interface{}: - result := NewMap() - for _, key := range c.sortedMapKeys(c.mapKeysFromStringMap(typedObj)) { - result.Set(key, c.fromUnorderedMaps(typedObj[key.(string)])) - } - return result - - case *Map: - panic("Expected map[interface{}]interface{} instead of *unordered.Map in fromUnorderedMaps") - - case []interface{}: - for i, item := range typedObj { - typedObj[i] = c.fromUnorderedMaps(item) - } - return typedObj - - default: - return typedObj - } -} - -func (Conversion) mapKeysFromInterfaceMap(m map[interface{}]interface{}) []interface{} { - var keys []interface{} - for k := range m { - keys = append(keys, k) - } - return keys -} - -func (Conversion) mapKeysFromStringMap(m map[string]interface{}) []interface{} { - var keys []interface{} - for k := range m { - keys = append(keys, k) - } - return keys -} - -func (Conversion) sortedMapKeys(keys []interface{}) []interface{} { - sort.Slice(keys, func(i, j int) bool { - iStr := fmt.Sprintf("%s", keys[i]) - jStr := fmt.Sprintf("%s", keys[j]) - return iStr < jStr - }) - return keys -} diff --git a/vendor/github.com/k14s/ytt/pkg/orderedmap/map.go b/vendor/github.com/k14s/ytt/pkg/orderedmap/map.go deleted file mode 100644 index 661662d7b..000000000 --- a/vendor/github.com/k14s/ytt/pkg/orderedmap/map.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package orderedmap - -import ( - "encoding/json" - "reflect" -) - -type Map struct { - items []MapItem -} - -type MapItem struct { - Key interface{} - Value interface{} -} - -func NewMap() *Map { - return &Map{} -} - -func NewMapWithItems(items []MapItem) *Map { - return &Map{items} -} - -func (m *Map) Set(key, value interface{}) { - for i, item := range m.items { - if m.isKeyEq(item.Key, key) { - item.Value = value - m.items[i] = item - return - } - } - m.items = append(m.items, MapItem{key, value}) -} - -func (m *Map) Get(key interface{}) (interface{}, bool) { - for _, item := range m.items { - if m.isKeyEq(item.Key, key) { - return item.Value, true - } - } - return nil, false -} - -func (m *Map) Delete(key interface{}) bool { - for i, item := range m.items { - if m.isKeyEq(item.Key, key) { - m.items = append(m.items[:i], m.items[i+1:]...) - return true - } - } - return false -} - -func (m *Map) isKeyEq(key1, key2 interface{}) bool { - return reflect.DeepEqual(key1, key2) -} - -func (m *Map) Keys() (keys []interface{}) { - m.Iterate(func(k, _ interface{}) { - keys = append(keys, k) - }) - return -} - -func (m *Map) Iterate(iterFunc func(k, v interface{})) { - for _, item := range m.items { - iterFunc(item.Key, item.Value) - } -} - -func (m *Map) IterateErr(iterFunc func(k, v interface{}) error) error { - for _, item := range m.items { - err := iterFunc(item.Key, item.Value) - if err != nil { - return err - } - } - return nil -} - -func (m *Map) Len() int { return len(m.items) } - -// Below methods disallow marshaling of Map directly -// TODO yaml library is not imported here -// var _ []yaml.Marshaler = []yaml.Marshaler{&Map{}} -var _ []json.Marshaler = []json.Marshaler{&Map{}} - -func (*Map) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of *orderedmap.Map") } -func (*Map) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of *orderedmap.Map") } diff --git a/vendor/github.com/k14s/ytt/pkg/schema/annotations.go b/vendor/github.com/k14s/ytt/pkg/schema/annotations.go deleted file mode 100644 index 46ded47e5..000000000 --- a/vendor/github.com/k14s/ytt/pkg/schema/annotations.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2021 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package schema - -import ( - "fmt" - "sort" - - "github.com/k14s/ytt/pkg/filepos" - - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - AnnotationNullable template.AnnotationName = "schema/nullable" - AnnotationType template.AnnotationName = "schema/type" - TypeAnnotationKwargAny string = "any" -) - -type Annotation interface { - NewTypeFromAnn() yamlmeta.Type -} - -type TypeAnnotation struct { - any bool - inferredType yamlmeta.Type - itemPosition *filepos.Position -} - -type NullableAnnotation struct { - providedValueType yamlmeta.Type - itemPosition *filepos.Position -} - -func NewTypeAnnotation(ann template.NodeAnnotation, inferredType yamlmeta.Type, pos *filepos.Position) (*TypeAnnotation, error) { - if len(ann.Kwargs) == 0 { - return nil, schemaAssertionError{ - position: pos, - description: fmt.Sprintf("expected @%v annotation to have keyword argument and value", AnnotationType), - expected: "valid keyword argument and value", - found: "missing keyword argument and value", - hints: []string{fmt.Sprintf("Supported key-value pairs are '%v=True', '%v=False'", TypeAnnotationKwargAny, TypeAnnotationKwargAny)}, - } - } - typeAnn := &TypeAnnotation{inferredType: inferredType, itemPosition: pos} - for _, kwarg := range ann.Kwargs { - argName, err := core.NewStarlarkValue(kwarg[0]).AsString() - if err != nil { - return nil, err - } - - switch argName { - case TypeAnnotationKwargAny: - isAnyType, err := core.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return nil, schemaAssertionError{ - position: pos, - description: "unknown @schema/type annotation keyword argument", - expected: "starlark.Bool", - found: fmt.Sprintf("%T", kwarg[1]), - hints: []string{fmt.Sprintf("Supported kwargs are '%v'", TypeAnnotationKwargAny)}, - } - } - typeAnn.any = isAnyType - - default: - return nil, schemaAssertionError{ - position: pos, - description: "unknown @schema/type annotation keyword argument", - expected: "A valid kwarg", - found: argName, - hints: []string{fmt.Sprintf("Supported kwargs are '%v'", TypeAnnotationKwargAny)}, - } - } - } - return typeAnn, nil -} - -func NewNullableAnnotation(ann template.NodeAnnotation, valueType yamlmeta.Type, pos *filepos.Position) (*NullableAnnotation, error) { - if len(ann.Kwargs) != 0 { - return nil, fmt.Errorf("expected @%v annotation to not contain any keyword arguments", AnnotationNullable) - } - - return &NullableAnnotation{valueType, pos}, nil -} - -func (t *TypeAnnotation) NewTypeFromAnn() yamlmeta.Type { - if t.any { - return &AnyType{ValueType: t.inferredType, Position: t.itemPosition} - } - return nil -} - -func (t *TypeAnnotation) IsAny() bool { - return t.any -} - -func (n *NullableAnnotation) NewTypeFromAnn() yamlmeta.Type { - return &NullType{ValueType: n.providedValueType, Position: n.itemPosition} -} - -func collectAnnotations(item yamlmeta.Node) ([]Annotation, error) { - var anns []Annotation - - for _, annotation := range []template.AnnotationName{AnnotationType, AnnotationNullable} { - ann, err := processOptionalAnnotation(item, annotation) - if err != nil { - return nil, err - } - if ann != nil { - anns = append(anns, ann) - } - } - return anns, nil -} - -func processOptionalAnnotation(node yamlmeta.Node, optionalAnnotation template.AnnotationName) (Annotation, error) { - nodeAnnotations := template.NewAnnotations(node) - - if nodeAnnotations.Has(optionalAnnotation) { - ann, _ := nodeAnnotations[optionalAnnotation] - - wrappedValueType, err := inferTypeFromValue(node.GetValues()[0], node.GetPosition()) - if err != nil { - return nil, err - } - - switch optionalAnnotation { - case AnnotationNullable: - nullAnn, err := NewNullableAnnotation(ann, wrappedValueType, node.GetPosition()) - if err != nil { - return nil, err - } - return nullAnn, nil - case AnnotationType: - typeAnn, err := NewTypeAnnotation(ann, wrappedValueType, node.GetPosition()) - if err != nil { - return nil, err - } - return typeAnn, nil - } - } - - return nil, nil -} - -func getTypeFromAnnotations(anns []Annotation) yamlmeta.Type { - annsCopy := append([]Annotation{}, anns...) - - if len(annsCopy) == 0 { - return nil - } - - // allow Configuration Author to annotate "nullable" as a fallback if "any" is false. - preferAnyTypeOverNullableType := func(i, j int) bool { - if typeAnn, ok := annsCopy[i].(*TypeAnnotation); ok && typeAnn.IsAny() { - return true - } - return false - } - - sort.Slice(annsCopy, preferAnyTypeOverNullableType) - return annsCopy[0].NewTypeFromAnn() -} diff --git a/vendor/github.com/k14s/ytt/pkg/schema/error.go b/vendor/github.com/k14s/ytt/pkg/schema/error.go deleted file mode 100644 index 39133b8e5..000000000 --- a/vendor/github.com/k14s/ytt/pkg/schema/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package schema - -import ( - "bytes" - "fmt" - "log" - "strings" - "text/template" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const schemaErrorReportTemplate = ` -{{- if .Summary}} -{{.Summary}} -{{addBreak .Summary}} -{{- end}} -{{- range .AssertionFailures}} -{{- if .Description}} -{{.Description}} -{{- end}} - -{{- if .FromMemory}} - -{{.SourceName}}: -{{pad "#" ""}} -{{pad "#" ""}} {{.Source}} -{{pad "#" ""}} -{{- else}} - -{{.FileName}}: -{{pad "|" ""}} -{{pad "|" .FilePos}} {{.Source}} -{{pad "|" ""}} -{{- end}} - -{{with .Found}}{{pad "=" ""}} found: {{.}}{{end}} -{{with .Expected}}{{pad "=" ""}} expected: {{.}}{{end}} -{{- range .Hints}} -{{pad "=" ""}} hint: {{.}} -{{- end}} -{{- end}} -{{.MiscErrorMessage}} -` - -func NewSchemaError(summary string, errs ...error) error { - var failures []assertionFailure - var miscErrorMessage string - for _, err := range errs { - if typeCheckAssertionErr, ok := err.(schemaAssertionError); ok { - failures = append(failures, assertionFailure{ - Description: typeCheckAssertionErr.description, - FileName: typeCheckAssertionErr.position.GetFile(), - FilePos: typeCheckAssertionErr.position.AsIntString(), - FromMemory: typeCheckAssertionErr.position.FromMemory(), - SourceName: "Data value calculated", - Source: typeCheckAssertionErr.position.GetLine(), - Expected: typeCheckAssertionErr.expected, - Found: typeCheckAssertionErr.found, - Hints: typeCheckAssertionErr.hints, - }) - } else { - miscErrorMessage += fmt.Sprintf("%s \n", err.Error()) - } - } - - return &schemaError{ - Summary: summary, - AssertionFailures: failures, - MiscErrorMessage: miscErrorMessage, - } -} - -func NewMismatchedTypeAssertionError(foundType yamlmeta.TypeWithValues, expectedType yamlmeta.Type) error { - var expectedTypeString string - if expectedType.GetDefinitionPosition().IsKnown() { - switch expectedType.(type) { - case *MapItemType, *ArrayItemType: - expectedTypeString = expectedType.GetValueType().String() - default: - expectedTypeString = expectedType.String() - } - } - - return schemaAssertionError{ - position: foundType.GetPosition(), - expected: fmt.Sprintf("%s (by %s)", expectedTypeString, expectedType.GetDefinitionPosition().AsCompactString()), - found: foundType.ValueTypeAsString(), - } -} - -func NewUnexpectedKeyAssertionError(found *yamlmeta.MapItem, definition *filepos.Position) error { - return schemaAssertionError{ - position: found.GetPosition(), - expected: fmt.Sprintf("(a key defined in map) (by %s)", definition.AsCompactString()), - found: fmt.Sprintf("%v", found.Key), - hints: []string{"declare data values in schema and override them in a data values document"}, - } -} - -type schemaError struct { - Summary string - AssertionFailures []assertionFailure - MiscErrorMessage string -} - -type assertionFailure struct { - Description string - FileName string - Source string - FilePos string - FromMemory bool - SourceName string - Expected string - Found string - Hints []string -} - -type schemaAssertionError struct { - error - position *filepos.Position - description string - expected string - found string - hints []string -} - -func (e schemaError) Error() string { - maxFilePos := 0 - for _, hunk := range e.AssertionFailures { - if len(hunk.FilePos) > maxFilePos { - maxFilePos = len(hunk.FilePos) - } - } - - funcMap := template.FuncMap{ - "pad": func(delim string, filePos string) string { - padding := " " - rightAlignedFilePos := fmt.Sprintf("%*s", maxFilePos, filePos) - return padding + rightAlignedFilePos + " " + delim - }, - "addBreak": func(title string) string { - return strings.Repeat("=", len(title)) - }, - } - - tmpl, err := template.New("").Funcs(funcMap).Parse(schemaErrorReportTemplate) - if err != nil { - log.Fatalf("parsing: %s", err) - } - - output := bytes.NewBufferString("") - - err = tmpl.Execute(output, e) - if err != nil { - panic(err.Error()) - } - - return output.String() -} diff --git a/vendor/github.com/k14s/ytt/pkg/schema/schema.go b/vendor/github.com/k14s/ytt/pkg/schema/schema.go deleted file mode 100644 index 338dcdd68..000000000 --- a/vendor/github.com/k14s/ytt/pkg/schema/schema.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package schema - -import ( - "fmt" - "strings" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/workspace/ref" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -type DocumentSchema struct { - Source *yamlmeta.Document - defaultDVs *yamlmeta.Document - DocType yamlmeta.Type -} - -type DocumentSchemaEnvelope struct { - Doc *DocumentSchema - - used bool - originalLibRef []ref.LibraryRef - libRef []ref.LibraryRef -} - -func NewDocumentSchema(doc *yamlmeta.Document) (*DocumentSchema, error) { - docType, err := inferTypeFromValue(doc, doc.Position) - if err != nil { - return nil, err - } - - schemaDVs := docType.GetDefaultValue() - - return &DocumentSchema{ - Source: doc, - defaultDVs: schemaDVs.(*yamlmeta.Document), - DocType: docType, - }, nil -} - -func NewDocumentSchemaEnvelope(doc *yamlmeta.Document) (*DocumentSchemaEnvelope, error) { - libRef, err := getSchemaLibRef(ref.LibraryRefExtractor{}, doc) - if err != nil { - return nil, err - } - - schema, err := NewDocumentSchema(doc) - if err != nil { - return nil, err - } - - return &DocumentSchemaEnvelope{ - Doc: schema, - originalLibRef: libRef, - libRef: libRef, - }, nil -} - -// NewNullSchema provides the "Null Object" value of Schema. This is used in the case where no schema was provided. -func NewNullSchema() *DocumentSchema { - return &DocumentSchema{ - Source: &yamlmeta.Document{}, - DocType: &DocumentType{ - ValueType: &AnyType{}}, - } -} - -func NewDocumentType(doc *yamlmeta.Document) (*DocumentType, error) { - typeOfValue, err := getType(doc) - if err != nil { - return nil, err - } - - return &DocumentType{Source: doc, Position: doc.Position, ValueType: typeOfValue, defaultValue: typeOfValue.GetDefaultValue()}, nil -} - -func NewMapType(m *yamlmeta.Map) (*MapType, error) { - mapType := &MapType{Position: m.Position} - - for _, mapItem := range m.Items { - mapItemType, err := NewMapItemType(mapItem) - if err != nil { - return nil, err - } - mapType.Items = append(mapType.Items, mapItemType) - } - - return mapType, nil -} - -func NewMapItemType(item *yamlmeta.MapItem) (*MapItemType, error) { - typeOfValue, err := getType(item) - if err != nil { - return nil, err - } - - return &MapItemType{Key: item.Key, ValueType: typeOfValue, defaultValue: typeOfValue.GetDefaultValue(), Position: item.Position}, nil -} - -func NewArrayType(a *yamlmeta.Array) (*ArrayType, error) { - if len(a.Items) != 1 { - return nil, NewSchemaError("Invalid schema - wrong number of items in array definition", schemaAssertionError{ - position: a.Position, - expected: "exactly 1 array item, of the desired type", - found: fmt.Sprintf("%d array items", len(a.Items)), - hints: []string{"in schema, the one item of the array implies the type of its elements.", "in schema, the default value for an array is always an empty list.", "default values can be overridden via a data values overlay."}, - }) - } - - arrayItemType, err := NewArrayItemType(a.Items[0]) - if err != nil { - return nil, err - } - - return &ArrayType{ItemsType: arrayItemType, defaultValue: &yamlmeta.Array{}, Position: a.Position}, nil -} - -func NewArrayItemType(item *yamlmeta.ArrayItem) (*ArrayItemType, error) { - typeOfValue, err := getType(item) - if err != nil { - return nil, err - } - - return &ArrayItemType{ValueType: typeOfValue, defaultValue: typeOfValue.GetDefaultValue(), Position: item.GetPosition()}, nil -} - -func getType(node yamlmeta.ValueHoldingNode) (yamlmeta.Type, error) { - var typeOfValue yamlmeta.Type - - anns, err := collectAnnotations(node) - if err != nil { - return nil, NewSchemaError("Invalid schema", err) - } - typeOfValue = getTypeFromAnnotations(anns) - - if typeOfValue == nil { - typeOfValue, err = inferTypeFromValue(node.Val(), node.GetPosition()) - if err != nil { - return nil, err - } - } - - err = valueTypeAllowsItemValue(typeOfValue, node.Val(), node.GetPosition()) - if err != nil { - return nil, err - } - - return typeOfValue, nil -} - -func inferTypeFromValue(value interface{}, position *filepos.Position) (yamlmeta.Type, error) { - switch typedContent := value.(type) { - case *yamlmeta.Document: - docType, err := NewDocumentType(typedContent) - if err != nil { - return nil, err - } - return docType, nil - case *yamlmeta.Map: - mapType, err := NewMapType(typedContent) - if err != nil { - return nil, err - } - return mapType, nil - case *yamlmeta.Array: - arrayType, err := NewArrayType(typedContent) - if err != nil { - return nil, err - } - return arrayType, nil - case string: - return &ScalarType{ValueType: *new(string), defaultValue: typedContent, Position: position}, nil - case float64: - return &ScalarType{ValueType: *new(float64), defaultValue: typedContent, Position: position}, nil - case int, int64, uint64: - return &ScalarType{ValueType: *new(int), defaultValue: typedContent, Position: position}, nil - case bool: - return &ScalarType{ValueType: *new(bool), defaultValue: typedContent, Position: position}, nil - case nil: - return nil, nil - } - - return nil, fmt.Errorf("Expected value '%s' to be a map, array, or scalar, but was %T", value, value) -} - -func valueTypeAllowsItemValue(explicitType yamlmeta.Type, itemValue interface{}, position *filepos.Position) error { - switch explicitType.(type) { - case *AnyType: - return nil - default: - if itemValue == nil { - return NewSchemaError("Invalid schema - null value not allowed here", schemaAssertionError{ - position: position, - expected: "non-null value", - found: "null value", - hints: []string{"in YAML, omitting a value implies null.", "to set the default value to null, annotate with @schema/nullable.", "to allow any value, annotate with @schema/type any=True."}, - }) - } - } - return nil -} - -type ExtractLibRefs interface { - FromAnnotation(template.NodeAnnotations) ([]ref.LibraryRef, error) -} - -func getSchemaLibRef(libRefs ExtractLibRefs, doc *yamlmeta.Document) ([]ref.LibraryRef, error) { - anns := template.NewAnnotations(doc) - libRef, err := libRefs.FromAnnotation(anns) - if err != nil { - return nil, err - } - return libRef, nil -} - -func (s *DocumentSchema) AssignType(typeable yamlmeta.Typeable) yamlmeta.TypeCheck { - return s.DocType.AssignTypeTo(typeable) -} - -func (s *DocumentSchema) DefaultDataValues() *yamlmeta.Document { - return s.defaultDVs -} - -func (s *DocumentSchema) deepCopy() *DocumentSchema { - return &DocumentSchema{ - Source: s.Source.DeepCopy(), - defaultDVs: s.defaultDVs.DeepCopy(), - DocType: s.DocType, - } -} - -func (s *DocumentSchema) ValidateWithValues(valuesFilesCount int) error { - return nil -} - -func (e *DocumentSchemaEnvelope) Source() *yamlmeta.Document { - return e.Doc.Source -} - -func (e *DocumentSchemaEnvelope) Desc() string { - var desc []string - for _, refPiece := range e.originalLibRef { - desc = append(desc, refPiece.AsString()) - } - return fmt.Sprintf("Schema belonging to library '%s%s' on %s", "@", - strings.Join(desc, "@"), e.Source().Position.AsString()) -} - -func (e *DocumentSchemaEnvelope) IsUsed() bool { return e.used } - -func (e *DocumentSchemaEnvelope) IntendedForAnotherLibrary() bool { - return len(e.libRef) > 0 -} - -func (e *DocumentSchemaEnvelope) UsedInLibrary(expectedRefPiece ref.LibraryRef) (*DocumentSchemaEnvelope, bool) { - if !e.IntendedForAnotherLibrary() { - e.markUsed() - - return e.deepCopy(), true - } - - if !e.libRef[0].Matches(expectedRefPiece) { - return nil, false - } - e.markUsed() - childSchemaProcessing := e.deepCopy() - childSchemaProcessing.libRef = childSchemaProcessing.libRef[1:] - return childSchemaProcessing, !childSchemaProcessing.IntendedForAnotherLibrary() -} - -func (e *DocumentSchemaEnvelope) markUsed() { e.used = true } - -func (e *DocumentSchemaEnvelope) deepCopy() *DocumentSchemaEnvelope { - var copiedPieces []ref.LibraryRef - copiedPieces = append(copiedPieces, e.libRef...) - return &DocumentSchemaEnvelope{ - Doc: e.Doc.deepCopy(), - originalLibRef: e.originalLibRef, - libRef: copiedPieces, - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/schema/type.go b/vendor/github.com/k14s/ytt/pkg/schema/type.go deleted file mode 100644 index 976f934b5..000000000 --- a/vendor/github.com/k14s/ytt/pkg/schema/type.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package schema - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -var _ yamlmeta.Type = (*DocumentType)(nil) -var _ yamlmeta.Type = (*MapType)(nil) -var _ yamlmeta.Type = (*MapItemType)(nil) -var _ yamlmeta.Type = (*ArrayType)(nil) -var _ yamlmeta.Type = (*ArrayItemType)(nil) -var _ yamlmeta.Type = (*AnyType)(nil) -var _ yamlmeta.Type = (*NullType)(nil) - -type DocumentType struct { - Source *yamlmeta.Document - ValueType yamlmeta.Type // typically one of: MapType, ArrayType, ScalarType - Position *filepos.Position - defaultValue interface{} -} -type MapType struct { - Items []*MapItemType - Position *filepos.Position -} -type MapItemType struct { - Key interface{} // usually a string - ValueType yamlmeta.Type - Position *filepos.Position - defaultValue interface{} -} -type ArrayType struct { - ItemsType yamlmeta.Type - Position *filepos.Position - defaultValue interface{} -} -type ArrayItemType struct { - ValueType yamlmeta.Type - Position *filepos.Position - defaultValue interface{} -} -type ScalarType struct { - ValueType interface{} - Position *filepos.Position - defaultValue interface{} -} -type AnyType struct { - ValueType yamlmeta.Type - Position *filepos.Position -} -type NullType struct { - ValueType yamlmeta.Type - Position *filepos.Position -} - -func (n NullType) GetDefaultValue() interface{} { - return nil -} - -func (a AnyType) GetDefaultValue() interface{} { - if a.ValueType == nil { - return nil - } - return a.ValueType.GetDefaultValue() // delegate GetDefaultValue() functions will make defensive copy 👍 -} - -func (m ScalarType) GetDefaultValue() interface{} { - return m.defaultValue // scalar values are copied (even through an interface{} reference) -} - -func (a ArrayItemType) GetDefaultValue() interface{} { - panic(fmt.Sprintf("Unexpected call to GetDefaultValue() on %+v", a)) -} - -func (a ArrayType) GetDefaultValue() interface{} { - defaultValues := &yamlmeta.Array{Position: a.Position} - return defaultValues -} - -func (t MapItemType) GetDefaultValue() interface{} { - return &yamlmeta.MapItem{Key: t.Key, Value: t.ValueType.GetDefaultValue(), Position: t.Position} -} - -func (m MapType) GetDefaultValue() interface{} { - defaultValues := &yamlmeta.Map{Position: m.Position} - for _, item := range m.Items { - newItem := item.GetDefaultValue() - defaultValues.Items = append(defaultValues.Items, newItem.(*yamlmeta.MapItem)) - } - return defaultValues -} - -func (t DocumentType) GetDefaultValue() interface{} { - return &yamlmeta.Document{Value: t.ValueType.GetDefaultValue(), Position: t.Position} -} - -func (n NullType) AssignTypeTo(typeable yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - childCheck := n.ValueType.AssignTypeTo(typeable) - chk.Violations = append(chk.Violations, childCheck.Violations...) - return -} - -func (n NullType) GetValueType() yamlmeta.Type { - return n.ValueType -} - -func (n NullType) CheckType(node yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - if len(node.GetValues()) == 1 && node.GetValues()[0] == nil { - return - } - - check := n.GetValueType().CheckType(node) - chk.Violations = check.Violations - - return -} - -func (n NullType) GetDefinitionPosition() *filepos.Position { - return n.Position -} - -func (n NullType) String() string { - return "null" -} - -func (t *DocumentType) GetValueType() yamlmeta.Type { - panic("Not implemented because it is unreachable") -} -func (m MapType) GetValueType() yamlmeta.Type { - panic("Not implemented because it is unreachable") -} -func (t MapItemType) GetValueType() yamlmeta.Type { - return t.ValueType -} -func (a ArrayType) GetValueType() yamlmeta.Type { - panic("Not implemented because it is unreachable") -} -func (a ArrayItemType) GetValueType() yamlmeta.Type { - return a.ValueType -} -func (m ScalarType) GetValueType() yamlmeta.Type { - panic("Not implemented because it is unreachable") -} -func (a AnyType) GetValueType() yamlmeta.Type { - return a -} - -func (t *DocumentType) GetDefinitionPosition() *filepos.Position { - return t.Position -} -func (m MapType) GetDefinitionPosition() *filepos.Position { - return m.Position -} -func (t MapItemType) GetDefinitionPosition() *filepos.Position { - return t.Position -} -func (a ArrayType) GetDefinitionPosition() *filepos.Position { - return a.Position -} -func (a ArrayItemType) GetDefinitionPosition() *filepos.Position { - return a.Position -} -func (m ScalarType) GetDefinitionPosition() *filepos.Position { - return m.Position -} -func (a AnyType) GetDefinitionPosition() *filepos.Position { - return a.Position -} - -func (t *DocumentType) String() string { - return "document" -} -func (m MapType) String() string { - return "map" -} -func (t MapItemType) String() string { - return fmt.Sprintf("%s: %s", t.Key, t.ValueType.String()) -} -func (a ArrayType) String() string { - return "array" -} -func (a ArrayItemType) String() string { - return fmt.Sprintf("- %s", a.ValueType.String()) -} -func (m ScalarType) String() string { - switch m.ValueType.(type) { - case float64: - return "float" - case int: - return "integer" - case bool: - return "boolean" - default: - return fmt.Sprintf("%T", m.ValueType) - } -} -func (a AnyType) String() string { - return "any" -} - -func (t *DocumentType) CheckType(_ yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - return -} - -func (m *MapType) CheckType(node yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - nodeMap, ok := node.(*yamlmeta.Map) - if !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, m)) - return - } - - for _, item := range nodeMap.Items { - if !m.AllowsKey(item.Key) { - chk.Violations = append(chk.Violations, - NewUnexpectedKeyAssertionError(item, m.Position)) - } - } - return -} - -func (t *MapItemType) CheckType(node yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - _, ok := node.(*yamlmeta.MapItem) - if !ok { - // A Map must've yielded a non-MapItem which is not valid YAML - panic(fmt.Sprintf("MapItem type check was called on a non-MapItem: %#v", node)) - } - - return -} - -func (a *ArrayType) CheckType(node yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - _, ok := node.(*yamlmeta.Array) - if !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, a)) - } - return -} - -func (a *ArrayItemType) CheckType(node yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - _, ok := node.(*yamlmeta.ArrayItem) - if !ok { - // An Array must've yielded a non-ArrayItem which is not valid YAML - panic(fmt.Sprintf("ArrayItem type check was called on a non-ArrayItem: %#v", node)) - } - return -} - -func (m *ScalarType) CheckType(node yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - value := node.GetValues()[0] - switch value.(type) { - case string: - if _, ok := m.ValueType.(string); !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, m)) - } - case float64: - if _, ok := m.ValueType.(float64); !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, m)) - } - case int, int64, uint64: - if _, ok := m.ValueType.(int); !ok { - if _, ok = m.ValueType.(float64); !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, m)) - } - } - case bool: - if _, ok := m.ValueType.(bool); !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, m)) - } - default: - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(node, m)) - } - return -} - -func (a AnyType) CheckType(_ yamlmeta.TypeWithValues) (chk yamlmeta.TypeCheck) { - return -} - -func (t *DocumentType) AssignTypeTo(typeable yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - doc, ok := typeable.(*yamlmeta.Document) - if !ok { - chk.Violations = append(chk.Violations, - NewMismatchedTypeAssertionError(typeable, t)) - return - } - doc.SetType(t) - typeableValue, isNode := doc.Value.(yamlmeta.Typeable) - if isNode { - childCheck := t.ValueType.AssignTypeTo(typeableValue) - chk.Violations = append(chk.Violations, childCheck.Violations...) - } // else, is a scalar - return chk -} - -func (m *MapType) AssignTypeTo(typeable yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - mapNode, ok := typeable.(*yamlmeta.Map) - if !ok { - chk.Violations = append(chk.Violations, NewMismatchedTypeAssertionError(typeable, m)) - return - } - var foundKeys []interface{} - typeable.SetType(m) - for _, mapItem := range mapNode.Items { - for _, itemType := range m.Items { - if mapItem.Key == itemType.Key { - foundKeys = append(foundKeys, itemType.Key) - childCheck := itemType.AssignTypeTo(mapItem) - chk.Violations = append(chk.Violations, childCheck.Violations...) - break - } - } - } - - m.applySchemaDefaults(foundKeys, chk, mapNode) - return -} - -func (m *MapType) applySchemaDefaults(foundKeys []interface{}, chk yamlmeta.TypeCheck, mapNode *yamlmeta.Map) { - for _, item := range m.Items { - if contains(foundKeys, item.Key) { - continue - } - - val := item.GetDefaultValue() - childCheck := item.AssignTypeTo(val.(*yamlmeta.MapItem)) - chk.Violations = append(chk.Violations, childCheck.Violations...) - err := mapNode.AddValue(val) - if err != nil { - panic(fmt.Sprintf("Internal inconsistency: adding map item: %s", err)) - } - } -} - -func contains(haystack []interface{}, needle interface{}) bool { - for _, key := range haystack { - if key == needle { - return true - } - } - return false -} - -func (t *MapItemType) AssignTypeTo(typeable yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - mapItem, ok := typeable.(*yamlmeta.MapItem) - if !ok { - panic(fmt.Sprintf("Attempt to assign type to a non-map-item (children of Maps can only be MapItems). type=%#v; typeable=%#v", t, typeable)) - } - typeable.SetType(t) - typeableValue, isNode := mapItem.Value.(yamlmeta.Typeable) - if isNode { - childCheck := t.ValueType.AssignTypeTo(typeableValue) - chk.Violations = append(chk.Violations, childCheck.Violations...) - } // else, is scalar - return -} - -func (a *ArrayType) AssignTypeTo(typeable yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - arrayNode, ok := typeable.(*yamlmeta.Array) - if !ok { - chk.Violations = append(chk.Violations, NewMismatchedTypeAssertionError(typeable, a)) - return - } - typeable.SetType(a) - for _, arrayItem := range arrayNode.Items { - childCheck := a.ItemsType.AssignTypeTo(arrayItem) - chk.Violations = append(chk.Violations, childCheck.Violations...) - } - return -} - -func (a *ArrayItemType) AssignTypeTo(typeable yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - arrayItem, ok := typeable.(*yamlmeta.ArrayItem) - if !ok { - panic(fmt.Sprintf("Attempt to assign type to a non-array-item (children of Arrays can only be ArrayItems). type=%#v; typeable=%#v", a, typeable)) - } - typeable.SetType(a) - typeableValue, isNode := arrayItem.Value.(yamlmeta.Typeable) - if isNode { - childCheck := a.ValueType.AssignTypeTo(typeableValue) - chk.Violations = append(chk.Violations, childCheck.Violations...) - } // else, is scalar - return -} - -func (m *ScalarType) AssignTypeTo(typeable yamlmeta.Typeable) yamlmeta.TypeCheck { - return yamlmeta.TypeCheck{[]error{NewMismatchedTypeAssertionError(typeable, m)}} -} - -func (a AnyType) AssignTypeTo(yamlmeta.Typeable) (chk yamlmeta.TypeCheck) { - return -} - -func (m *MapType) AllowsKey(key interface{}) bool { - for _, item := range m.Items { - if item.Key == key { - return true - } - } - return false -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/ancestors.go b/vendor/github.com/k14s/ytt/pkg/template/ancestors.go deleted file mode 100644 index 3965c65fa..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/ancestors.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" -) - -type Ancestors struct { - nodeNumToParentNum map[NodeTag]NodeTag -} - -func NewAncestors(nodeNumToParentNum map[NodeTag]NodeTag) Ancestors { - return Ancestors{nodeNumToParentNum} -} - -func (e Ancestors) FindParentTag(tag NodeTag) NodeTag { - parentTag, ok := e.nodeNumToParentNum[tag] - if !ok { - panic(fmt.Sprintf("expected to find parent tag for %s", tag)) - } - return parentTag -} - -func (e Ancestors) FindCommonParentTag(currTag, newTag NodeTag) NodeTag { - currAncestors := e.ancestors([]NodeTag{currTag}, currTag) - newAncestors := e.ancestors([]NodeTag{}, newTag) - commonAncestor := NodeTagRoot - - for i := 0; i < max(len(currAncestors), len(newAncestors)); i++ { - if i == len(currAncestors) || i == len(newAncestors) { - break - } - if currAncestors[i].Equals(newAncestors[i]) { - commonAncestor = currAncestors[i] - } else { - break - } - } - - if false { // for debugging - fmt.Printf("---\n") - fmt.Printf("inst: %d\n", newTag) - fmt.Printf("curr: %#v\n", currAncestors) - fmt.Printf("new : %#v\n", newAncestors) - fmt.Printf("comm: %d\n", commonAncestor) - } - - return commonAncestor -} - -func (e Ancestors) ancestors(result []NodeTag, tag NodeTag) []NodeTag { - for { - parentTag, ok := e.nodeNumToParentNum[tag] - if !ok { - panic(fmt.Sprintf("expected to find parent tag for %s", tag)) - } - result = append([]NodeTag{parentTag}, result...) - if parentTag.Equals(NodeTagRoot) { - break - } - tag = parentTag - } - return result -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/annotations.go b/vendor/github.com/k14s/ytt/pkg/template/annotations.go deleted file mode 100644 index 3a874455e..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/annotations.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "strings" - - "github.com/k14s/starlark-go/starlark" -) - -const ( - AnnotationComment AnnotationName = "comment" - AnnotationCode AnnotationName = "template/code" - AnnotationValue AnnotationName = "template/value" -) - -type NodeAnnotations map[AnnotationName]NodeAnnotation - -type NodeAnnotation struct { - Args starlark.Tuple - Kwargs []starlark.Tuple -} - -func NewAnnotations(node EvaluationNode) NodeAnnotations { - result, ok := node.GetAnnotations().(NodeAnnotations) - if !ok { - result = NodeAnnotations{} - } - return result -} - -func (as NodeAnnotations) DeepCopyAsInterface() interface{} { - return as.DeepCopy() -} - -func (as NodeAnnotations) DeepCopy() NodeAnnotations { - result := NodeAnnotations{} - for k, v := range as { - result[k] = v // Dont need to copy v - } - return result -} - -func (as NodeAnnotations) Has(name AnnotationName) bool { - _, found := as[name] - return found -} - -func (as NodeAnnotations) Args(name AnnotationName) starlark.Tuple { - na, found := as[name] - if !found { - return starlark.Tuple{} - } - return na.Args -} - -func (as NodeAnnotations) Kwargs(name AnnotationName) []starlark.Tuple { - na, found := as[name] - if !found { - return []starlark.Tuple{} - } - return na.Kwargs -} - -func (as NodeAnnotations) DeleteNs(ns AnnotationNs) { - prefix := string(ns) + "/" - for k := range as { - if strings.HasPrefix(string(k), prefix) { - delete(as, k) - } - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/compiled_template.go b/vendor/github.com/k14s/ytt/pkg/template/compiled_template.go deleted file mode 100644 index 46676ce41..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/compiled_template.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "github.com/k14s/starlark-go/resolve" - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/syntax" - "github.com/k14s/ytt/pkg/filepos" - tplcore "github.com/k14s/ytt/pkg/template/core" - "strings" - "unicode" -) - -type EvaluationCtxDialectName string -type EvaluationCtxDialects map[EvaluationCtxDialectName]EvaluationCtxDialect - -type CompiledTemplate struct { - name string - code []Line - instructions *InstructionSet - nodes *Nodes - evalDialects EvaluationCtxDialects - rootCtx *EvaluationCtx - ctxs []*EvaluationCtx -} - -func NewCompiledTemplate(name string, code []Line, - instructions *InstructionSet, nodes *Nodes, - evalDialects EvaluationCtxDialects) *CompiledTemplate { - - // TODO package globals - resolve.AllowFloat = true - resolve.AllowSet = true - resolve.AllowLambda = true - resolve.AllowNestedDef = true - resolve.AllowBitwise = true - resolve.AllowRecursion = true - resolve.AllowGlobalReassign = true - - return &CompiledTemplate{ - name: name, - code: code, - instructions: instructions, - nodes: nodes, - evalDialects: evalDialects, - } -} - -func (e *CompiledTemplate) Code() []Line { return e.code } - -func (e *CompiledTemplate) CodeAtLine(pos *filepos.Position) *Line { - for i, line := range e.code { - if i+1 == pos.LineNum() { - return &line - } - } - return nil -} - -func (e *CompiledTemplate) CodeAsString() string { - result := []string{} - cont := false - for _, line := range e.code { - src := line.Instruction.AsString() - if !cont { - src = strings.TrimLeftFunc(src, unicode.IsSpace) - } - cont = strings.HasSuffix(src, "\\") - result = append(result, src) - } - // Do not add any unnecessary newlines to match code lines - return strings.Join(result, "\n") -} - -func (e *CompiledTemplate) DebugCodeAsString() string { - result := []string{"src: tmpl: code: | srccode"} - - for i, line := range e.code { - src := "" - pos := filepos.NewUnknownPosition() - - if line.SourceLine != nil { - src = line.SourceLine.Content - pos = line.SourceLine.Position - } - - result = append(result, fmt.Sprintf("%s: %4d: %s | %s", - pos.As4DigitString(), i+1, line.Instruction.AsString(), src)) - } - - // Do not add any unnecessary newlines to match code lines - return strings.Join(result, "\n") -} - -func (e *CompiledTemplate) Eval(thread *starlark.Thread, loader CompiledTemplateLoader) ( - starlark.StringDict, interface{}, error) { - - globals := make(starlark.StringDict) - - if e.nodes != nil { - instructionBindings := map[string]tplcore.StarlarkFunc{ - // TODO ProgramAST should get rid of set ctx type calls - e.instructions.SetCtxType.Name: e.tplSetCtxType, - e.instructions.StartCtx.Name: e.tplStartCtx, - e.instructions.EndCtx.Name: e.tplEndCtx, - e.instructions.StartNodeAnnotation.Name: e.tplStartNodeAnnotation, - e.instructions.CollectNodeAnnotation.Name: e.tplCollectNodeAnnotation, - e.instructions.StartNode.Name: e.tplStartNode, - e.instructions.SetNode.Name: e.tplSetNode, - e.instructions.SetMapItemKey.Name: e.tplSetMapItemKey, - } - - for name, f := range instructionBindings { - globals[name] = starlark.NewBuiltin(name, tplcore.ErrWrapper(f)) - } - } - - updatedGlobals, val, err := e.eval(thread, globals) - if err != nil { - return nil, nil, NewCompiledTemplateMultiError(err, loader) - } - - // Since load statement does not allow importing - // symbols starting with '_'; do not export them, i.e. consider private - e.hidePrivateGlobals(updatedGlobals) - - return updatedGlobals, val, nil -} - -func (e *CompiledTemplate) eval( - thread *starlark.Thread, globals starlark.StringDict) ( - gs starlark.StringDict, resultVal interface{}, resultErr error) { - - // Catch any panics to give a better contextual information - defer func() { - if err := recover(); err != nil { - if typedErr, ok := err.(error); ok { - resultErr = typedErr - } else { - resultErr = fmt.Errorf("(p) %s", err) - } - } - }() - - f, err := syntax.Parse(e.name, e.CodeAsString(), syntax.BlockScanner) - if err != nil { - return nil, nil, err - } - - NewProgramAST(f, e.instructions).InsertTplCtxs() - - prog, err := starlark.FileProgram(f, globals.Has) - if err != nil { - return nil, nil, err - } - - // clear before execution - e.rootCtx = nil - e.ctxs = nil - - updatedGlobals, err := prog.Init(thread, globals) - if err != nil { - return nil, nil, err - } - - updatedGlobals.Freeze() - - if len(e.ctxs) > 0 { - panic("expected all ctxs to end") - } - - // Plain starlark programs do not have any ctxs - if e.rootCtx != nil { - resultVal = e.rootCtx.RootNode() - } - - return updatedGlobals, resultVal, nil -} - -func (e *CompiledTemplate) hidePrivateGlobals(globals starlark.StringDict) { - var privateKeys []string - - for k := range globals { - if strings.HasPrefix(k, "_") { - privateKeys = append(privateKeys, k) - } - } - - for _, k := range privateKeys { - delete(globals, k) - } -} - -func (e *CompiledTemplate) newCtx(ctxType EvaluationCtxDialectName) *EvaluationCtx { - return &EvaluationCtx{ - nodes: e.nodes, - ancestors: e.nodes.Ancestors(), - dialect: e.evalDialects[ctxType], - - pendingAnnotations: map[NodeTag]NodeAnnotations{}, - pendingMapItemKeys: map[NodeTag]interface{}{}, - } -} - -func (e *CompiledTemplate) tplSetCtxType( - thread *starlark.Thread, _ *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return starlark.None, nil -} - -func (e *CompiledTemplate) tplStartCtx( - thread *starlark.Thread, _ *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - ctxType, err := tplcore.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - e.ctxs = append(e.ctxs, e.newCtx(EvaluationCtxDialectName(ctxType))) - - if len(e.ctxs) == 1 && e.rootCtx == nil { - e.rootCtx = e.ctxs[0] - } - - return starlark.None, nil -} - -func (e *CompiledTemplate) tplEndCtx( - thread *starlark.Thread, _ *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if len(e.ctxs) == 0 { - panic("unexpected ctx end") - } - - var returnVal starlark.Value - switch args.Len() { - case 0: - returnVal = e.ctxs[len(e.ctxs)-1].RootNodeAsStarlarkValue() - case 1: - returnVal = args.Index(0) - default: - return starlark.None, fmt.Errorf("expected zero or one argument") - } - - e.ctxs = e.ctxs[:len(e.ctxs)-1] - return returnVal, nil -} - -func (e *CompiledTemplate) tplSetNode( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return e.ctxs[len(e.ctxs)-1].TplSetNode(thread, f, args, kwargs) -} - -func (e *CompiledTemplate) tplSetMapItemKey( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return e.ctxs[len(e.ctxs)-1].TplSetMapItemKey(thread, f, args, kwargs) -} - -func (e *CompiledTemplate) tplStartNodeAnnotation( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return e.ctxs[len(e.ctxs)-1].TplStartNodeAnnotation(thread, f, args, kwargs) -} - -func (e *CompiledTemplate) tplCollectNodeAnnotation( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return e.ctxs[len(e.ctxs)-1].TplCollectNodeAnnotation(thread, f, args, kwargs) -} - -func (e *CompiledTemplate) tplStartNode( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return e.ctxs[len(e.ctxs)-1].TplStartNode(thread, f, args, kwargs) -} - -func (e *CompiledTemplate) TplReplaceNode( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - return e.ctxs[len(e.ctxs)-1].TplReplace(thread, f, args, kwargs) -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/compiled_template_error.go b/vendor/github.com/k14s/ytt/pkg/template/compiled_template_error.go deleted file mode 100644 index 7d8b8df01..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/compiled_template_error.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "strings" - - "github.com/k14s/starlark-go/resolve" - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/syntax" - "github.com/k14s/ytt/pkg/filepos" -) - -type CompiledTemplateMultiError struct { - errs []CompiledTemplateError - loader CompiledTemplateLoader -} - -var _ error = CompiledTemplateMultiError{} - -type CompiledTemplateError struct { - Positions []CompiledTemplateErrorPosition - Msg string -} - -type CompiledTemplateErrorPosition struct { - Filename string - ContextName string - TemplateLine *Line - - BeforeTemplateLine *Line - AfterTemplateLine *Line -} - -func NewCompiledTemplateMultiError(err error, loader CompiledTemplateLoader) error { - e := CompiledTemplateMultiError{loader: loader} - - switch typedErr := err.(type) { - case syntax.Error: - e.errs = append(e.errs, CompiledTemplateError{ - Positions: []CompiledTemplateErrorPosition{e.buildPos(typedErr.Pos)}, - Msg: typedErr.Msg, - }) - - case resolve.ErrorList: - for _, resolveErr := range typedErr { - e.errs = append(e.errs, CompiledTemplateError{ - Positions: []CompiledTemplateErrorPosition{e.buildPos(resolveErr.Pos)}, - Msg: resolveErr.Msg, - }) - } - - case *starlark.EvalError: - e.errs = append(e.errs, e.buildEvalErr(typedErr)) - - default: - e.errs = append(e.errs, CompiledTemplateError{Msg: err.Error()}) - } - - return e -} - -func (e CompiledTemplateMultiError) Error() string { - result := []string{""} - - for _, err := range e.errs { - var topicLine string - var otherLines []string - - if !strings.Contains(err.Msg, "\n") { - topicLine = err.Msg - } else { - for i, line := range strings.Split(err.Msg, "\n") { - if i == 0 { - topicLine = line - } else { - otherLines = append(otherLines, line) - } - } - } - - result = append(result, fmt.Sprintf("- %s%s", topicLine, e.hintMsg(topicLine))) - - for _, pos := range err.Positions { - // TODO do better - if pos.TemplateLine == nil { - continue - } - - linePad := " " - - if len(pos.ContextName) > 0 { - result = append(result, linePad+"in "+pos.ContextName) - linePad += " " - } - - if pos.TemplateLine.SourceLine != nil { - if pos.TemplateLine.SourceLine.Selection != nil { - result = append(result, fmt.Sprintf("%s%s%s", - linePad, e.posPrefixStr(pos.TemplateLine.SourceLine.Selection), pos.TemplateLine.SourceLine.Selection.Content)) - } else { - result = append(result, fmt.Sprintf("%s%s%s", - linePad, e.posPrefixStr(pos.TemplateLine.SourceLine), pos.TemplateLine.SourceLine.Content)) - } - } else { - if pos.BeforeTemplateLine != nil && pos.BeforeTemplateLine.SourceLine != nil { - result = append(result, fmt.Sprintf("%s%s%s", - linePad, e.posPrefixStr(pos.BeforeTemplateLine.SourceLine), pos.BeforeTemplateLine.SourceLine.Content)) - } - - result = append(result, fmt.Sprintf("%s%s:? | %s (generated)", - linePad, pos.Filename, pos.TemplateLine.Instruction.AsString())) - - if pos.AfterTemplateLine != nil && pos.AfterTemplateLine.SourceLine != nil { - result = append(result, fmt.Sprintf("%s%s%s", - linePad, e.posPrefixStr(pos.AfterTemplateLine.SourceLine), pos.AfterTemplateLine.SourceLine.Content)) - } - } - } - - if len(otherLines) > 0 { - result = append(result, []string{"", fmt.Sprintf(" reason:")}...) - for _, line := range otherLines { - result = append(result, fmt.Sprintf(" %s", line)) - } - } - } - - return strings.Join(result, "\n") -} - -func (e CompiledTemplateMultiError) posPrefixStr(srcLine *SourceLine) string { - // TODO show column information - return fmt.Sprintf("%s | ", srcLine.Position.AsCompactString()) -} - -func (e CompiledTemplateMultiError) buildEvalErr(err *starlark.EvalError) CompiledTemplateError { - // fmt.Printf("frame:\n%s\n", err.Backtrace()) - result := CompiledTemplateError{Msg: err.Msg} - for i := len(err.CallStack) - 1; i >= 0; i-- { - pos := e.buildPos(err.CallStack[i].Pos) - pos.ContextName = err.CallStack[i].Name - result.Positions = append(result.Positions, pos) - } - return result -} - -func (e CompiledTemplateMultiError) buildPos(pos syntax.Position) CompiledTemplateErrorPosition { - // TODO seems to be a bug in starlark where, for example, - // "function call2 takes exactly 1 positional argument (0 given)" - // error has 0 line number position (even though its 1 based) - if pos.Line == 0 { - return CompiledTemplateErrorPosition{} - } - - ct, err := e.loader.FindCompiledTemplate(pos.Filename()) - if err != nil { - panic(fmt.Errorf("Expected to find compiled template: %s", err)) - } - - line := ct.CodeAtLine(filepos.NewPosition(int(pos.Line))) - if line == nil { - panic(fmt.Errorf("Expected to find compiled template line %d", pos.Line)) - } - - return CompiledTemplateErrorPosition{ - Filename: pos.Filename(), - TemplateLine: line, - BeforeTemplateLine: e.findClosestLine(ct, int(pos.Line), -1), - AfterTemplateLine: e.findClosestLine(ct, int(pos.Line), 1), - } -} - -func (CompiledTemplateMultiError) findClosestLine(ct *CompiledTemplate, posLine int, lineInc int) *Line { - for { - posLine += lineInc - if posLine < 1 { - return nil - } - - line := ct.CodeAtLine(filepos.NewPosition(posLine)) - if line == nil || line.SourceLine != nil { - return line - } - } -} - -func (CompiledTemplateMultiError) hintMsg(errMsg string) string { - hintMsg := "" - switch errMsg { - case "undefined: true": - hintMsg = "use 'True' instead of 'true' for boolean assignment" - case "undefined: false": - hintMsg = "use 'False' instead of 'false' for boolean assignment" - case "got newline, want ':'": - hintMsg = "missing colon at the end of 'if/for/def' statement?" - case "undefined: null": - hintMsg = "use 'None' instead of 'null' to indicate no value" - case "undefined: nil": - hintMsg = "use 'None' instead of 'nil' to indicate no value" - case "undefined: none": - hintMsg = "use 'None' instead of 'none' to indicate no value" - case "unhandled index operation struct[string]": - hintMsg = "use getattr(...) to access struct field programmatically" - } - - if len(hintMsg) > 0 { - hintMsg = fmt.Sprintf(" (hint: %s)", hintMsg) - } - return hintMsg -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/compiled_template_loader.go b/vendor/github.com/k14s/ytt/pkg/template/compiled_template_loader.go deleted file mode 100644 index f1b86889a..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/compiled_template_loader.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" -) - -type CompiledTemplateLoader interface { - FindCompiledTemplate(string) (*CompiledTemplate, error) - Load(*starlark.Thread, string) (starlark.StringDict, error) -} - -type NoopCompiledTemplateLoader struct { - tpl *CompiledTemplate -} - -func NewNoopCompiledTemplateLoader(tpl *CompiledTemplate) NoopCompiledTemplateLoader { - return NoopCompiledTemplateLoader{tpl} -} - -var _ CompiledTemplateLoader = NoopCompiledTemplateLoader{} - -func (l NoopCompiledTemplateLoader) FindCompiledTemplate(_ string) (*CompiledTemplate, error) { - if l.tpl != nil { - return l.tpl, nil - } - return nil, fmt.Errorf("FindCompiledTemplate is not supported") -} - -func (l NoopCompiledTemplateLoader) Load( - thread *starlark.Thread, module string) (starlark.StringDict, error) { - - return nil, fmt.Errorf("Load is not supported") -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/core/args.go b/vendor/github.com/k14s/ytt/pkg/template/core/args.go deleted file mode 100644 index dc022b1fc..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/core/args.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" -) - -func BoolArg(kwargs []starlark.Tuple, keyToFind string) (bool, error) { - for _, arg := range kwargs { - key, err := NewStarlarkValue(arg.Index(0)).AsString() - if err != nil { - return false, err - } - if key == keyToFind { - return NewStarlarkValue(arg.Index(1)).AsBool() - } - } - return false, nil -} - -func Int64Arg(kwargs []starlark.Tuple, keyToFind string) (int64, error) { - for _, arg := range kwargs { - key, err := NewStarlarkValue(arg.Index(0)).AsString() - if err != nil { - return 0, err - } - if key == keyToFind { - return NewStarlarkValue(arg.Index(1)).AsInt64() - } - } - return 0, nil -} - -func CheckArgNames(kwargs []starlark.Tuple, validKeys map[string]struct{}) error { - for _, arg := range kwargs { - key, err := NewStarlarkValue(arg.Index(0)).AsString() - if err != nil { - return err - } - if _, ok := validKeys[key]; !ok { - return fmt.Errorf("invalid argument name: %s", key) - } - } - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/core/errs.go b/vendor/github.com/k14s/ytt/pkg/template/core/errs.go deleted file mode 100644 index 3adf52205..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/core/errs.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "runtime/debug" -) - -type StarlarkFunc func(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) - -func ErrWrapper(wrappedFunc StarlarkFunc) StarlarkFunc { - return func(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (val starlark.Value, resultErr error) { - // Catch any panics to give a better contextual information - defer func() { - if err := recover(); err != nil { - if typedErr, ok := err.(error); ok { - resultErr = fmt.Errorf("%s (backtrace: %s)", typedErr, debug.Stack()) - } else { - resultErr = fmt.Errorf("(p) %s (backtrace: %s)", err, debug.Stack()) - } - } - }() - - val, err := wrappedFunc(thread, f, args, kwargs) - if err != nil { - return val, fmt.Errorf("%s: %s", f.Name(), err) - } - - return val, nil - } -} - -func ErrDescWrapper(desc string, wrappedFunc StarlarkFunc) StarlarkFunc { - return func(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - val, err := wrappedFunc(thread, f, args, kwargs) - if err != nil { - return val, fmt.Errorf("%s: %s", desc, err) - } - return val, nil - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/core/go_value.go b/vendor/github.com/k14s/ytt/pkg/template/core/go_value.go deleted file mode 100644 index d9b5ca331..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/core/go_value.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/orderedmap" -) - -type GoValueToStarlarkValueConversion interface { - AsStarlarkValue() starlark.Value -} - -type GoValue struct { - val interface{} - opts GoValueOpts -} - -type GoValueOpts struct { - MapIsStruct bool - Convert func(interface{}) (starlark.Value, bool) -} - -func NewGoValue(val interface{}) GoValue { - return GoValue{val, GoValueOpts{}} -} - -func NewGoValueWithOpts(val interface{}, opts GoValueOpts) GoValue { - return GoValue{val, opts} -} - -func (e GoValue) AsStarlarkValue() starlark.Value { - return e.asStarlarkValue(e.val) -} - -func (e GoValue) asStarlarkValue(val interface{}) starlark.Value { - if e.opts.Convert != nil { - converted, ok := e.opts.Convert(val) - if ok { - return converted - } - } - - if obj, ok := val.(GoValueToStarlarkValueConversion); ok { - return obj.AsStarlarkValue() - } - - switch typedVal := val.(type) { - case nil: - return starlark.None // TODO is it nil or is it None - - case bool: - return starlark.Bool(typedVal) - - case string: - return starlark.String(typedVal) - - case int: - return starlark.MakeInt(typedVal) - - case int64: - return starlark.MakeInt64(typedVal) - - case uint: - return starlark.MakeUint(typedVal) - - case uint64: - return starlark.MakeUint64(typedVal) - - case float64: - return starlark.Float(typedVal) - - case map[string]interface{}: - panic("Expected *orderedmap.Map instead of map[string]interface{} for conversion to starlark value") - - case map[interface{}]interface{}: - panic("Expected *orderedmap.Map instead of map[interface{}]interface{} for conversion to starlark value") - - case *orderedmap.Map: - return e.dictAsStarlarkValue(typedVal) - - case []interface{}: - return e.listAsStarlarkValue(typedVal) - - default: - panic(fmt.Sprintf("unknown type %T for conversion to starlark value", val)) - } -} - -func (e GoValue) dictAsStarlarkValue(val *orderedmap.Map) starlark.Value { - if e.opts.MapIsStruct { - data := orderedmap.NewMap() - val.Iterate(func(k, v interface{}) { - if keyStr, ok := k.(string); ok { - data.Set(keyStr, e.asStarlarkValue(v)) - } else { - panic(fmt.Sprintf("expected struct key %s to be string", k)) // TODO - } - }) - return &StarlarkStruct{data} - } - - result := &starlark.Dict{} - val.Iterate(func(k, v interface{}) { - result.SetKey(e.asStarlarkValue(k), e.asStarlarkValue(v)) - }) - return result -} - -func (e GoValue) listAsStarlarkValue(val []interface{}) *starlark.List { - result := []starlark.Value{} - for _, v := range val { - result = append(result, e.asStarlarkValue(v)) - } - return starlark.NewList(result) -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/core/starlark_noop.go b/vendor/github.com/k14s/ytt/pkg/template/core/starlark_noop.go deleted file mode 100644 index e7a215856..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/core/starlark_noop.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" -) - -type StarlarkNoop struct{} - -var _ starlark.Value = &StarlarkNoop{} - -func (s *StarlarkNoop) String() string { return "noop" } -func (s *StarlarkNoop) Type() string { return "noop" } -func (s *StarlarkNoop) Freeze() {} -func (s *StarlarkNoop) Truth() starlark.Bool { return false } -func (s *StarlarkNoop) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: noop") } diff --git a/vendor/github.com/k14s/ytt/pkg/template/core/starlark_struct.go b/vendor/github.com/k14s/ytt/pkg/template/core/starlark_struct.go deleted file mode 100644 index c1f527cb6..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/core/starlark_struct.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/orderedmap" -) - -type StarlarkStruct struct { - data *orderedmap.Map // [string]starlark.Value; most common usage: HasAttrs -} - -func NewStarlarkStruct(goStringKeyToStarlarkValue *orderedmap.Map) *StarlarkStruct { - return &StarlarkStruct{data: goStringKeyToStarlarkValue} -} - -var _ starlark.Value = (*StarlarkStruct)(nil) -var _ starlark.HasAttrs = (*StarlarkStruct)(nil) -var _ starlark.IterableMapping = (*StarlarkStruct)(nil) -var _ starlark.Sequence = (*StarlarkStruct)(nil) - -func (s *StarlarkStruct) String() string { return "struct(...)" } -func (s *StarlarkStruct) Type() string { return "struct" } -func (s *StarlarkStruct) Freeze() {} // TODO -func (s *StarlarkStruct) Truth() starlark.Bool { return s.data.Len() > 0 } -func (s *StarlarkStruct) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: struct") } -func (s *StarlarkStruct) Len() int { return s.data.Len() } - -// returns (nil, nil) if attribute not present -func (s *StarlarkStruct) Attr(name string) (starlark.Value, error) { - val, found := s.data.Get(name) - if found { - return val.(starlark.Value), nil - } - return nil, nil -} - -// callers must not modify the result. -func (s *StarlarkStruct) AttrNames() []string { - var keys []string - s.data.Iterate(func(key, _ interface{}) { - keys = append(keys, key.(string)) - }) - return keys -} - -func (s *StarlarkStruct) Get(key starlark.Value) (val starlark.Value, found bool, err error) { - attrName, err := NewStarlarkValue(key).AsString() - if err != nil { - return starlark.None, false, err - } - value, found := s.data.Get(attrName) - if found { - return value.(starlark.Value), true, nil - } - return starlark.None, false, nil -} - -func (s *StarlarkStruct) Iterate() starlark.Iterator { - return &StarlarkStructIterator{ - keys: s.data.Keys(), - } -} - -func (s *StarlarkStruct) Items() (items []starlark.Tuple) { - s.data.Iterate(func(key, val interface{}) { - items = append(items, starlark.Tuple{ - NewGoValue(key).AsStarlarkValue(), - val.(starlark.Value), - }) - }) - return -} - -type StarlarkStructIterator struct { - keys []interface{} - idx int -} - -var _ starlark.Iterator = &StarlarkStructIterator{} - -func (s *StarlarkStructIterator) Next(p *starlark.Value) bool { - if s.idx < len(s.keys) { - *p = NewGoValue(s.keys[s.idx]).AsStarlarkValue() - s.idx++ - return true - } - return false -} - -func (s *StarlarkStructIterator) Done() { /* intentionally blank. */ } diff --git a/vendor/github.com/k14s/ytt/pkg/template/core/starlark_value.go b/vendor/github.com/k14s/ytt/pkg/template/core/starlark_value.go deleted file mode 100644 index 11ef50eac..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/core/starlark_value.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/orderedmap" -) - -type StarlarkValueToGoValueConversion interface { - AsGoValue() (interface{}, error) -} - -var _ StarlarkValueToGoValueConversion = &StarlarkValue{} - -type UnconvertableStarlarkValue interface { - ConversionHint() string -} - -type StarlarkValue struct { - val starlark.Value -} - -func NewStarlarkValue(val starlark.Value) StarlarkValue { - return StarlarkValue{val} -} - -func (e StarlarkValue) AsGoValue() (interface{}, error) { - return e.asInterface(e.val) -} - -func (e StarlarkValue) AsString() (string, error) { - if typedVal, ok := e.val.(starlark.String); ok { - return string(typedVal), nil - } - return "", fmt.Errorf("expected a string, but was %s", e.val.Type()) -} - -func (e StarlarkValue) AsBool() (bool, error) { - if typedVal, ok := e.val.(starlark.Bool); ok { - return bool(typedVal), nil - } - return false, fmt.Errorf("expected starlark.Bool, but was %T", e.val) -} - -func (e StarlarkValue) AsInt64() (int64, error) { - if typedVal, ok := e.val.(starlark.Int); ok { - i1, ok := typedVal.Int64() - if ok { - return i1, nil - } - return 0, fmt.Errorf("expected int64 value") - } - return 0, fmt.Errorf("expected starlark.Int") -} - -func (e StarlarkValue) asInterface(val starlark.Value) (interface{}, error) { - if obj, ok := val.(UnconvertableStarlarkValue); ok { - return nil, fmt.Errorf("Unable to convert value: %s", obj.ConversionHint()) - } - if obj, ok := val.(StarlarkValueToGoValueConversion); ok { - return obj.AsGoValue() - } - - switch typedVal := val.(type) { - case nil, starlark.NoneType: - return nil, nil // TODO is it nil or is it None - - case starlark.Bool: - return bool(typedVal), nil - - case starlark.String: - return string(typedVal), nil - - case starlark.Int: - i1, ok := typedVal.Int64() - if ok { - return i1, nil - } - i2, ok := typedVal.Uint64() - if ok { - return i2, nil - } - panic("not sure how to get int") // TODO - - case starlark.Float: - return float64(typedVal), nil - - case *starlark.Dict: - return e.dictAsInterface(typedVal) - - case *StarlarkStruct: - return e.structAsInterface(typedVal) - - case *starlark.List: - return e.itearableAsInterface(typedVal) - - case starlark.Tuple: - return e.itearableAsInterface(typedVal) - - case *starlark.Set: - return e.itearableAsInterface(typedVal) - - default: - panic(fmt.Sprintf("unknown type %T for conversion to go value", val)) - } -} - -func (e StarlarkValue) dictAsInterface(val *starlark.Dict) (interface{}, error) { - result := orderedmap.NewMap() - for _, item := range val.Items() { - if item.Len() != 2 { - panic("dict item is not KV") - } - key, err := e.asInterface(item.Index(0)) - if err != nil { - return nil, err - } - value, err := e.asInterface(item.Index(1)) - if err != nil { - return nil, err - } - result.Set(key, value) - } - return result, nil -} - -func (e StarlarkValue) structAsInterface(val *StarlarkStruct) (interface{}, error) { - // TODO accessing privates - result := orderedmap.NewMap() - err := val.data.IterateErr(func(k, v interface{}) error { - value, err := e.asInterface(v.(starlark.Value)) - if err == nil { - result.Set(k, value) - } - return err - }) - if err != nil { - return nil, err - } - return result, nil -} - -func (e StarlarkValue) itearableAsInterface(iterable starlark.Iterable) (interface{}, error) { - iter := iterable.Iterate() - defer iter.Done() - - var result []interface{} - var x starlark.Value - for iter.Next(&x) { - elem, err := e.asInterface(x) - if err != nil { - return nil, err - } - result = append(result, elem) - } - return result, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/evaluation_ctx.go b/vendor/github.com/k14s/ytt/pkg/template/evaluation_ctx.go deleted file mode 100644 index 3d6fd5a18..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/evaluation_ctx.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template/core" - // Should not import template specific packages here (like yamlmeta) -) - -type EvaluationCtx struct { - dialect EvaluationCtxDialect - - nodes *Nodes - ancestors Ancestors - - pendingAnnotations map[NodeTag]NodeAnnotations - pendingMapItemKeys map[NodeTag]interface{} - - rootInit bool - rootNode EvaluationNode - parentNodes []EvaluationNode - parentNodeTags []NodeTag -} - -type EvaluationNode interface { - GetValues() []interface{} - SetValue(interface{}) error - AddValue(interface{}) error - ResetValue() - GetAnnotations() interface{} - SetAnnotations(interface{}) - DeepCopyAsInterface() interface{} // expects that result implements EvaluationNode -} - -type EvaluationCtxDialect interface { - PrepareNode(parentNode EvaluationNode, val EvaluationNode) error - SetMapItemKey(node EvaluationNode, val interface{}) error - Replace(parentNodes []EvaluationNode, val interface{}) error - ShouldWrapRootValue(val interface{}) bool - WrapRootValue(val interface{}) interface{} -} - -func (e *EvaluationCtx) TplReplace(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - nodes := append([]EvaluationNode{e.rootNode}, e.parentNodes...) - val, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - - err = e.dialect.Replace(nodes, val) - if err != nil { - return starlark.None, err - } - - return &core.StarlarkNoop{}, nil -} - -// args(nodeTag, value Value) -func (e *EvaluationCtx) TplSetNode(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() > 1 { - if _, noop := args.Index(1).(*core.StarlarkNoop); !noop { - val, err := core.NewStarlarkValue(args.Index(1)).AsGoValue() - if err != nil { - return starlark.None, err - } - err = e.parentNodes[len(e.parentNodes)-1].SetValue(val) - if err != nil { - return starlark.None, err - } - } - return starlark.None, nil - } - - // use default value from AST since no user provided value was given - nodeTag, err := NewNodeTagFromStarlarkValue(args.Index(0)) - if err != nil { - return starlark.None, err - } - - node, ok := e.nodes.FindNode(nodeTag) - if !ok { - return starlark.None, fmt.Errorf("expected to find %s", nodeTag) - } - - for _, val := range node.GetValues() { - err := e.parentNodes[len(e.parentNodes)-1].AddValue(val) - if err != nil { - return starlark.None, err - } - } - - return starlark.None, nil -} - -// args(nodeTag, value Value) -func (e *EvaluationCtx) TplSetMapItemKey( - thread *starlark.Thread, _ *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 2 { - return starlark.None, fmt.Errorf("expected exactly 2 arguments") - } - - nodeTag, err := NewNodeTagFromStarlarkValue(args.Index(0)) - if err != nil { - return starlark.None, err - } - - if _, found := e.pendingMapItemKeys[nodeTag]; found { - panic(fmt.Sprintf("expected to find not map item key for node %s", nodeTag)) - } - - e.pendingMapItemKeys[nodeTag], err = core.NewStarlarkValue(args.Index(1)).AsGoValue() - if err != nil { - return starlark.None, err - } - - return starlark.None, nil -} - -// args(args..., kwargs...) -func (e *EvaluationCtx) TplCollectNodeAnnotation( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - result := starlark.Tuple{args} - for _, kwarg := range kwargs { - result = append(result, kwarg) - } - return result, nil -} - -// args(nodeTag, name, values) -func (e *EvaluationCtx) TplStartNodeAnnotation( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - nodeTag, err := NewNodeTagFromStarlarkValue(args.Index(0)) - if err != nil { - return starlark.None, err - } - - annNameStr, err := core.NewStarlarkValue(args.Index(1)).AsString() - if err != nil { - return starlark.None, err - } - - annName := AnnotationName(annNameStr) - annVals := args.Index(2).(starlark.Tuple) - - kwargs = []starlark.Tuple{} - for _, val := range annVals[1:] { - kwargs = append(kwargs, val.(starlark.Tuple)) - } - - if _, found := e.pendingAnnotations[nodeTag]; !found { - e.pendingAnnotations[nodeTag] = NodeAnnotations{} - } - - // TODO overrides last set value - e.pendingAnnotations[nodeTag][annName] = NodeAnnotation{ - Args: annVals[0].(starlark.Tuple), - Kwargs: kwargs, - } - - return starlark.None, nil -} - -// args(nodeTag) -func (e *EvaluationCtx) TplStartNode( - thread *starlark.Thread, _ *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - nodeTag, err := NewNodeTagFromStarlarkValue(args.Index(0)) - if err != nil { - return starlark.None, err - } - - return starlark.None, e.startNode(nodeTag) -} - -func (e *EvaluationCtx) startNode(nodeTag NodeTag) error { - node, ok := e.nodes.FindNode(nodeTag) - if !ok { - return fmt.Errorf("expected to find %s", nodeTag) - } - - nodeVal := node.DeepCopyAsInterface().(EvaluationNode) - nodeVal.ResetValue() - - if nodeAnns, found := e.pendingAnnotations[nodeTag]; found { - delete(e.pendingAnnotations, nodeTag) - nodeVal.SetAnnotations(nodeAnns) - } - - if mapItemKey, found := e.pendingMapItemKeys[nodeTag]; found { - delete(e.pendingMapItemKeys, nodeTag) - e.dialect.SetMapItemKey(nodeVal, mapItemKey) - } - - if !e.rootInit { - if e.dialect.ShouldWrapRootValue(nodeVal) { - err := e.startNode(e.ancestors.FindParentTag(nodeTag)) - if err != nil { - return err - } - } else { - e.rootInit = true - e.rootNode = nodeVal - } - } - - if len(e.parentNodes) > 0 { - commonParentTag := e.ancestors.FindCommonParentTag( - e.parentNodeTags[len(e.parentNodeTags)-1], nodeTag) - e.unwindToTag(commonParentTag) - - err := e.dialect.PrepareNode(e.parentNodes[len(e.parentNodes)-1], nodeVal) - if err != nil { - return err - } - - err = e.parentNodes[len(e.parentNodes)-1].AddValue(nodeVal) - if err != nil { - return err - } - } - - e.parentNodeTags = append(e.parentNodeTags, nodeTag) - e.parentNodes = append(e.parentNodes, nodeVal) - - return nil -} - -func (e *EvaluationCtx) RootNode() interface{} { return e.rootNode } - -func (e *EvaluationCtx) RootNodeAsStarlarkValue() starlark.Value { - val := e.dialect.WrapRootValue(e.rootNode) - if typedVal, ok := val.(starlark.Value); ok { - return typedVal - } - return core.NewGoValue(val).AsStarlarkValue() -} - -func (e *EvaluationCtx) unwindToTag(tag NodeTag) { - for i, parentTag := range e.parentNodeTags { - if parentTag.Equals(tag) { - e.parentNodes = e.parentNodes[:i+1] - e.parentNodeTags = e.parentNodeTags[:i+1] - return - } - } - panic(fmt.Sprintf("expected to find %s when unwinding", tag)) -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/instructions.go b/vendor/github.com/k14s/ytt/pkg/template/instructions.go deleted file mode 100644 index bb056898a..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/instructions.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "strings" -) - -type InstructionSet struct { - SetCtxType InstructionOp - StartCtx InstructionOp - EndCtx InstructionOp - StartNodeAnnotation InstructionOp - CollectNodeAnnotation InstructionOp - StartNode InstructionOp - SetNode InstructionOp - SetMapItemKey InstructionOp - ReplaceNode InstructionOp -} - -var ( - globalInsSetID = 1 -) - -func NewInstructionSet() *InstructionSet { - globalInsSetID++ - uniqueID := globalInsSetID - return &InstructionSet{ - SetCtxType: InstructionOp{fmt.Sprintf("__ytt_tpl%d_set_ctx_type", uniqueID)}, - StartCtx: InstructionOp{fmt.Sprintf("__ytt_tpl%d_start_ctx", uniqueID)}, - EndCtx: InstructionOp{fmt.Sprintf("__ytt_tpl%d_end_ctx", uniqueID)}, - StartNodeAnnotation: InstructionOp{fmt.Sprintf("__ytt_tpl%d_start_node_annotation", uniqueID)}, - CollectNodeAnnotation: InstructionOp{fmt.Sprintf("__ytt_tpl%d_collect_node_annotation", uniqueID)}, - StartNode: InstructionOp{fmt.Sprintf("__ytt_tpl%d_start_node", uniqueID)}, - SetNode: InstructionOp{fmt.Sprintf("__ytt_tpl%d_set_node", uniqueID)}, - SetMapItemKey: InstructionOp{fmt.Sprintf("__ytt_tpl%d_set_map_item_key", uniqueID)}, - ReplaceNode: InstructionOp{fmt.Sprintf("__ytt_tpl%d_replace_node", uniqueID)}, - } -} - -func (is *InstructionSet) NewSetCtxType(dialect EvaluationCtxDialectName) Instruction { - return is.SetCtxType.WithArgs(`"` + string(dialect) + `"`) -} - -func (is *InstructionSet) NewStartCtx(dialect EvaluationCtxDialectName) Instruction { - return is.StartCtx.WithArgs(`"` + string(dialect) + `"`) -} - -func (is *InstructionSet) NewEndCtx() Instruction { - return is.EndCtx.WithArgs() -} - -func (is *InstructionSet) NewEndCtxNone() Instruction { - return is.EndCtx.WithArgs("None") -} - -func (is *InstructionSet) NewStartNodeAnnotation(nodeTag NodeTag, ann Annotation) Instruction { - collectedArgs := is.CollectNodeAnnotation.WithArgs(ann.Content).AsString() - return is.StartNodeAnnotation.WithArgs(nodeTag.AsString(), `"`+string(ann.Name)+`"`, collectedArgs) -} - -func (is *InstructionSet) NewStartNode(nodeTag NodeTag) Instruction { - return is.StartNode.WithArgs(nodeTag.AsString()) -} - -func (is *InstructionSet) NewSetNode(nodeTag NodeTag) Instruction { - return is.SetNode.WithArgs(nodeTag.AsString()) -} - -func (is *InstructionSet) NewSetNodeValue(nodeTag NodeTag, code string) Instruction { - return is.SetNode.WithArgs(nodeTag.AsString(), "("+code+")") -} - -func (is *InstructionSet) NewSetMapItemKey(nodeTag NodeTag, code string) Instruction { - return is.SetMapItemKey.WithArgs(nodeTag.AsString(), "("+code+")") -} - -func (is *InstructionSet) NewCode(code string) Instruction { - return Instruction{code: code} -} - -type InstructionOp struct { - Name string -} - -func (op InstructionOp) WithArgs(args ...string) Instruction { - return Instruction{op: op, code: fmt.Sprintf("%s(%s)", op.Name, strings.Join(args, ", "))} -} - -type Instruction struct { - op InstructionOp - code string -} - -func (i Instruction) Op() InstructionOp { return i.op } -func (i Instruction) AsString() string { return i.code } - -func (i Instruction) WithDebug(info string) Instruction { - return Instruction{op: i.op, code: i.code + " # " + info} -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/meta.go b/vendor/github.com/k14s/ytt/pkg/template/meta.go deleted file mode 100644 index 4459c02b0..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/meta.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - "strings" -) - -type AnnotationName string -type AnnotationNs string - -const ( - AnnotationNameComment AnnotationName = "comment" -) - -type Meta struct { - Annotations []*Annotation -} - -type Annotation struct { - Name AnnotationName // eg template/code - Content string // eg if True: -} - -// Supported formats: -// "! comment" -// "@comment content" -// "@ if True:" -// "@template/code" -// "@template/code if True:" -// "@text/trim-left,text/trim-right,template/code if True:" - -type MetaOpts struct { - IgnoreUnknown bool -} - -func NewMetaFromString(data string, opts MetaOpts) (Meta, error) { - meta := Meta{} - - // TODO better error messages? - switch { - case len(data) > 0 && data[0] == '!': - meta.Annotations = []*Annotation{{ - Name: AnnotationNameComment, - Content: data[1:], - }} - - case len(data) > 0 && data[0] == '@': - pieces := strings.SplitN(data[1:], " ", 2) - for _, name := range strings.Split(pieces[0], ",") { - meta.Annotations = append(meta.Annotations, &Annotation{ - Name: AnnotationName(name), - }) - } - if len(pieces) == 2 { - meta.Annotations[len(meta.Annotations)-1].Content = pieces[1] - } - - default: - if opts.IgnoreUnknown { - meta.Annotations = []*Annotation{{ - Name: AnnotationNameComment, - Content: data, - }} - } else { - return Meta{}, fmt.Errorf("Unrecognized comment type (expected '#@' or '#!')") - } - } - - return meta, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/nodes.go b/vendor/github.com/k14s/ytt/pkg/template/nodes.go deleted file mode 100644 index 06e37094d..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/nodes.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "strconv" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - NodeTagRoot = NodeTag{-100} -) - -type Nodes struct { - id int - tagToNode map[NodeTag]EvaluationNode - childToParentTag map[NodeTag]NodeTag -} - -func NewNodes() *Nodes { - return &Nodes{ - tagToNode: map[NodeTag]EvaluationNode{}, - childToParentTag: map[NodeTag]NodeTag{}, - } -} - -func (n *Nodes) Ancestors() Ancestors { return NewAncestors(n.childToParentTag) } - -func (n *Nodes) AddRootNode(node EvaluationNode) NodeTag { - n.id++ - tag := NodeTag{n.id} - n.tagToNode[tag] = node - n.childToParentTag[tag] = NodeTagRoot - return tag -} - -func (n *Nodes) AddNode(node EvaluationNode, parentTag NodeTag) NodeTag { - n.id++ - tag := NodeTag{n.id} - n.tagToNode[tag] = node - n.childToParentTag[tag] = parentTag - return tag -} - -func (n *Nodes) FindNode(tag NodeTag) (EvaluationNode, bool) { - node, ok := n.tagToNode[tag] - return node, ok -} - -type NodeTag struct { - id int -} - -func NewNodeTag(id int) NodeTag { return NodeTag{id} } - -func NewNodeTagFromStarlarkValue(val starlark.Value) (NodeTag, error) { - id, err := core.NewStarlarkValue(val).AsInt64() - if err != nil { - return NodeTag{}, err - } - return NodeTag{int(id)}, nil -} - -func (t NodeTag) Equals(other NodeTag) bool { return t.id == other.id } -func (t NodeTag) String() string { return "node tag " + strconv.Itoa(t.id) } -func (t NodeTag) AsString() string { return strconv.Itoa(t.id) } diff --git a/vendor/github.com/k14s/ytt/pkg/template/program_ast.go b/vendor/github.com/k14s/ytt/pkg/template/program_ast.go deleted file mode 100644 index e04c01366..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/program_ast.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "fmt" - - "github.com/k14s/starlark-go/syntax" -) - -type ProgramAST struct { - f *syntax.File - defNestingUsesTpl []bool - ctxType *syntax.Literal - instructions *InstructionSet -} - -func NewProgramAST(f *syntax.File, instructions *InstructionSet) *ProgramAST { - return &ProgramAST{f: f, instructions: instructions} -} - -func (r *ProgramAST) InsertTplCtxs() { - r.stmts(r.f.Stmts) -} - -func (r *ProgramAST) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - r.stmt(stmt) - } -} - -func (r *ProgramAST) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - r.expr(stmt.X) - - case *syntax.BranchStmt: - // do nothing - - case *syntax.IfStmt: - r.expr(stmt.Cond) - r.stmts(stmt.True) - r.stmts(stmt.False) - - case *syntax.AssignStmt: - r.expr(stmt.RHS) - - case *syntax.DefStmt: - r.function(stmt.Def, stmt.Name.Name, stmt) - - case *syntax.ForStmt: - r.expr(stmt.X) - r.stmts(stmt.Body) - - case *syntax.WhileStmt: - r.expr(stmt.Cond) - r.stmts(stmt.Body) - - case *syntax.ReturnStmt: - if len(r.defNestingUsesTpl) > 0 { - if r.defNestingUsesTpl[len(r.defNestingUsesTpl)-1] { - args := []syntax.Expr{} - if stmt.Result != nil { - args = []syntax.Expr{stmt.Result} - } - stmt.Result = &syntax.CallExpr{ - Fn: &syntax.Ident{Name: r.instructions.EndCtx.Name}, - Args: args, - } - } - } else { - if stmt.Result != nil { - r.expr(stmt.Result) - } - } - - case *syntax.LoadStmt: - // do nothing - - default: - panic(fmt.Sprintf("unexpected stmt %T", stmt)) - } -} - -func (r *ProgramAST) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.Ident: - if e.Name == r.instructions.StartNode.Name || e.Name == r.instructions.SetNode.Name { - if len(r.defNestingUsesTpl) > 0 { - r.defNestingUsesTpl[len(r.defNestingUsesTpl)-1] = true - } - } - - case *syntax.Literal: - // do nothing - - case *syntax.ListExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.CondExpr: - r.expr(e.Cond) - r.expr(e.True) - r.expr(e.False) - - case *syntax.IndexExpr: - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DictEntry: - r.expr(e.Key) - r.expr(e.Value) - - case *syntax.SliceExpr: - r.expr(e.X) - if e.Lo != nil { - r.expr(e.Lo) - } - if e.Hi != nil { - r.expr(e.Hi) - } - if e.Step != nil { - r.expr(e.Step) - } - - case *syntax.Comprehension: - // The 'in' operand of the first clause (always a ForClause) - // is resolved in the outer block; consider: [x for x in x]. - clause := e.Clauses[0].(*syntax.ForClause) - r.expr(clause.X) - - for _, clause := range e.Clauses[1:] { - switch clause := clause.(type) { - case *syntax.IfClause: - r.expr(clause.Cond) - case *syntax.ForClause: - r.expr(clause.X) - } - } - r.expr(e.Body) - - case *syntax.TupleExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.DictExpr: - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - r.expr(entry.Key) - r.expr(entry.Value) - } - - case *syntax.UnaryExpr: - r.expr(e.X) - - case *syntax.BinaryExpr: - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DotExpr: - r.expr(e.X) - - case *syntax.CallExpr: - if ident, ok := e.Fn.(*syntax.Ident); ok { - if ident.Name == r.instructions.SetCtxType.Name { - r.ctxType = e.Args[0].(*syntax.Literal) - } - } - r.expr(e.Fn) - for _, arg := range e.Args { - if unop, ok := arg.(*syntax.UnaryExpr); ok && unop.Op == syntax.STARSTAR { - r.expr(arg) - } else if ok && unop.Op == syntax.STAR { - r.expr(arg) - } else if binop, ok := arg.(*syntax.BinaryExpr); ok && binop.Op == syntax.EQ { - r.expr(binop.Y) - } else { - r.expr(arg) - } - } - - case *syntax.LambdaExpr: - r.expr(e.Body) - - case *syntax.ParenExpr: - r.expr(e.X) - - default: - panic(fmt.Sprintf("unexpected expr %T", e)) - } -} - -func (r *ProgramAST) function(pos syntax.Position, name string, function *syntax.DefStmt) { - r.defNestingUsesTpl = append(r.defNestingUsesTpl, false) - - for _, param := range function.Params { - if binary, ok := param.(*syntax.BinaryExpr); ok { - r.expr(binary.Y) - } - } - - r.stmts(function.Body) - - if r.defNestingUsesTpl[len(r.defNestingUsesTpl)-1] { - r.addTplCtxToFunction(function) - } - - r.defNestingUsesTpl = r.defNestingUsesTpl[:len(r.defNestingUsesTpl)-1] -} - -func (r *ProgramAST) addTplCtxToFunction(function *syntax.DefStmt) { - if r.ctxType == nil { - panic("expected r.ctxType to be set") - } - - startStmt := &syntax.ExprStmt{ - X: &syntax.CallExpr{ - Fn: &syntax.Ident{Name: r.instructions.StartCtx.Name}, - Args: []syntax.Expr{r.ctxType}, - }, - } - - endStmt := &syntax.ReturnStmt{ - Result: &syntax.CallExpr{ - Fn: &syntax.Ident{Name: r.instructions.EndCtx.Name}, - }, - } - - function.Body = append(append([]syntax.Stmt{startStmt}, function.Body...), endStmt) -} diff --git a/vendor/github.com/k14s/ytt/pkg/template/source.go b/vendor/github.com/k14s/ytt/pkg/template/source.go deleted file mode 100644 index 0061ee14b..000000000 --- a/vendor/github.com/k14s/ytt/pkg/template/source.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package template - -import ( - "bytes" - - "github.com/k14s/ytt/pkg/filepos" -) - -type Line struct { - Instruction Instruction - SourceLine *SourceLine -} - -type SourceLine struct { - Position *filepos.Position - Content string - Selection *SourceLine -} - -func NewCodeFromBytes(bs []byte, instructions *InstructionSet) []Line { - return NewCodeFromBytesAtPosition(bs, filepos.NewPosition(1), instructions) -} - -func NewCodeFromBytesAtPosition(bs []byte, pos *filepos.Position, instructions *InstructionSet) []Line { - var result []Line - - for i, line := range bytes.Split(bs, []byte("\n")) { - result = append(result, Line{ - Instruction: instructions.NewCode(string(line)), - SourceLine: NewSourceLine(pos.DeepCopyWithLineOffset(i), string(line)), - }) - } - - return result -} - -func NewSourceLine(pos *filepos.Position, content string) *SourceLine { - if !pos.IsKnown() { - panic("Expected source line position to be known") - } - return &SourceLine{Position: pos, Content: content} -} diff --git a/vendor/github.com/k14s/ytt/pkg/texttemplate/ast.go b/vendor/github.com/k14s/ytt/pkg/texttemplate/ast.go deleted file mode 100644 index 3575d8c9b..000000000 --- a/vendor/github.com/k14s/ytt/pkg/texttemplate/ast.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package texttemplate - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" -) - -type NodeRoot struct { - Items []interface{} - - annotations interface{} -} - -type NodeText struct { - Position *filepos.Position - Content string - - annotations interface{} - startOffset int -} - -type NodeCode struct { - Position *filepos.Position - Content string - - startOffset int -} - -// NodeCode is not a evaluation node -var _ = []template.EvaluationNode{&NodeRoot{}, &NodeText{}} - -func (n *NodeRoot) AsString() string { - var result string - for _, item := range n.Items { - switch typedItem := item.(type) { - case *NodeText: - result += typedItem.Content - default: - panic(fmt.Sprintf("unknown node type %T", typedItem)) - } - } - return result -} - -func (n *NodeRoot) GetValues() []interface{} { - var result []interface{} - for _, item := range n.Items { - result = append(result, item) - } - return result -} - -func (n *NodeRoot) SetValue(val interface{}) error { - return fmt.Errorf("cannot set value on a noderoot") -} -func (n *NodeRoot) AddValue(val interface{}) error { n.Items = append(n.Items, val); return nil } -func (n *NodeRoot) ResetValue() { n.Items = nil } - -func (n *NodeRoot) DeepCopyAsInterface() interface{} { - var newItems []interface{} - for _, item := range n.Items { - if typedItem, ok := item.(interface{ DeepCopyAsInterface() interface{} }); ok { - newItems = append(newItems, typedItem.DeepCopyAsInterface()) - } else { - panic(fmt.Sprintf("unknown node type %T", typedItem)) - } - } - return &NodeRoot{Items: newItems, annotations: annotationsDeepCopy(n.annotations)} -} - -func (n *NodeRoot) GetAnnotations() interface{} { return n.annotations } -func (n *NodeRoot) SetAnnotations(anns interface{}) { n.annotations = anns } - -func (n *NodeText) GetValues() []interface{} { return []interface{}{n.Content} } - -func (n *NodeText) SetValue(val interface{}) error { - if typedVal, ok := val.(string); ok { - n.Content = typedVal - return nil - } - return fmt.Errorf("cannot set non-string value (%T), consider using str(...) to convert to string", val) -} - -func (n *NodeText) AddValue(val interface{}) error { n.Content = val.(string); return nil } -func (n *NodeText) ResetValue() { n.Content = "" } - -func (n *NodeText) DeepCopyAsInterface() interface{} { - return &NodeText{Position: n.Position, Content: n.Content, annotations: annotationsDeepCopy(n.annotations)} -} - -func (n *NodeText) GetAnnotations() interface{} { return n.annotations } -func (n *NodeText) SetAnnotations(anns interface{}) { n.annotations = anns } - -func annotationsDeepCopy(anns interface{}) interface{} { - if anns == nil { - return nil - } - return anns.(interface{ DeepCopyAsInterface() interface{} }).DeepCopyAsInterface() -} diff --git a/vendor/github.com/k14s/ytt/pkg/texttemplate/evaluation_ctx.go b/vendor/github.com/k14s/ytt/pkg/texttemplate/evaluation_ctx.go deleted file mode 100644 index c021c874d..000000000 --- a/vendor/github.com/k14s/ytt/pkg/texttemplate/evaluation_ctx.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package texttemplate - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/template" -) - -const ( - EvaluationCtxDialectName template.EvaluationCtxDialectName = "text" -) - -type EvaluationCtx struct{} - -var _ template.EvaluationCtxDialect = EvaluationCtx{} - -func (e EvaluationCtx) PrepareNode( - parentNode template.EvaluationNode, node template.EvaluationNode) error { - - return nil -} - -func (e EvaluationCtx) SetMapItemKey(node template.EvaluationNode, val interface{}) error { - return fmt.Errorf("unsupported operation") -} - -func (e EvaluationCtx) Replace( - parentNodes []template.EvaluationNode, val interface{}) error { - - return fmt.Errorf("unsupported operation") -} - -func (e EvaluationCtx) ShouldWrapRootValue(nodeVal interface{}) bool { - _, root := nodeVal.(*NodeRoot) - return !root -} - -func (e EvaluationCtx) WrapRootValue(val interface{}) interface{} { - if typedVal, ok := val.(*NodeRoot); ok { - return typedVal.AsString() - } - panic(fmt.Sprintf("Unexpected root value %T", val)) -} diff --git a/vendor/github.com/k14s/ytt/pkg/texttemplate/metas.go b/vendor/github.com/k14s/ytt/pkg/texttemplate/metas.go deleted file mode 100644 index 1705f4111..000000000 --- a/vendor/github.com/k14s/ytt/pkg/texttemplate/metas.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package texttemplate - -import ( - "strings" -) - -type NodeCodeMeta struct { - *NodeCode -} - -func (p NodeCodeMeta) ShoudTrimSpaceLeft() bool { - return strings.HasPrefix(p.Content, "-") -} - -func (p NodeCodeMeta) ShouldTrimSpaceRight() bool { - return strings.HasSuffix(p.Content, "-") -} - -func (p NodeCodeMeta) ShouldPrint() bool { - return strings.HasPrefix(p.Content, "=") || strings.HasPrefix(p.Content, "-=") -} - -func (p NodeCodeMeta) Code() string { - result := strings.TrimPrefix(p.Content, "-=") // longer first - result = strings.TrimPrefix(result, "=") - result = strings.TrimPrefix(result, "-") - return strings.TrimSuffix(result, "-") -} diff --git a/vendor/github.com/k14s/ytt/pkg/texttemplate/parser.go b/vendor/github.com/k14s/ytt/pkg/texttemplate/parser.go deleted file mode 100644 index b6542e899..000000000 --- a/vendor/github.com/k14s/ytt/pkg/texttemplate/parser.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package texttemplate - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/filepos" -) - -type Parser struct { - associatedName string -} - -func NewParser() *Parser { - return &Parser{} -} - -func (p *Parser) Parse(dataBs []byte, associatedName string) (*NodeRoot, error) { - return p.parse(dataBs, associatedName, filepos.NewPosition(1)) -} - -func (p *Parser) ParseWithPosition(dataBs []byte, associatedName string, startPos *filepos.Position) (*NodeRoot, error) { - return p.parse(dataBs, associatedName, startPos) -} - -func (p *Parser) parse(dataBs []byte, associatedName string, startPos *filepos.Position) (*NodeRoot, error) { - p.associatedName = associatedName - - var lastChar rune - var currLine int = 1 - var currCol int = 1 - - if startPos.IsKnown() { - currLine = startPos.LineNum() - } - - var lastNode interface{} = &NodeText{Position: p.newPosition(currLine)} - var nodes []interface{} - - data := string(dataBs) - - for i, currChar := range data { - if lastChar == '(' && currChar == '@' { - switch typedLastNode := lastNode.(type) { - case *NodeText: - typedLastNode.Content = data[typedLastNode.startOffset : i-1] - nodes = append(nodes, lastNode) - lastNode = &NodeCode{ - Position: p.newPosition(currLine), - startOffset: i + 1, - } - case *NodeCode: - return nil, fmt.Errorf( - "Unexpected code opening '(@' at line %d col %d", currLine, currCol) - default: - panic(fmt.Sprintf("unknown string template piece %T", typedLastNode)) - } - } - - if lastChar == '@' && currChar == ')' { - switch typedLastNode := lastNode.(type) { - case *NodeText: - return nil, fmt.Errorf( - "Unexpected code closing '@)' at line %d col %d", currLine, currCol) - case *NodeCode: - typedLastNode.Content = data[typedLastNode.startOffset : i-1] - nodes = append(nodes, lastNode) - lastNode = &NodeText{ - Position: p.newPosition(currLine), - startOffset: i + 1, - } - default: - panic(fmt.Sprintf("unknown string template piece %T", typedLastNode)) - } - } - - if currChar == '\n' { - currLine++ - currCol = 1 - } else { - currCol++ - } - - lastChar = currChar - } - - // close last node - switch typedLastNode := lastNode.(type) { - case *NodeText: - typedLastNode.Content = data[typedLastNode.startOffset:len(data)] - nodes = append(nodes, lastNode) - case *NodeCode: - return nil, fmt.Errorf( - "Missing code closing '@)' at line %d col %d", currLine, currCol) - default: - panic(fmt.Sprintf("unknown string template piece %T", typedLastNode)) - } - - return &NodeRoot{Items: nodes}, nil -} - -func (p *Parser) newPosition(line int) *filepos.Position { - pos := filepos.NewPosition(line) - pos.SetFile(p.associatedName) - return pos -} diff --git a/vendor/github.com/k14s/ytt/pkg/texttemplate/template.go b/vendor/github.com/k14s/ytt/pkg/texttemplate/template.go deleted file mode 100644 index 375d90825..000000000 --- a/vendor/github.com/k14s/ytt/pkg/texttemplate/template.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package texttemplate - -import ( - "fmt" - "strings" - "unicode" - - "github.com/k14s/ytt/pkg/template" -) - -type Template struct { - name string -} - -func NewTemplate(name string) *Template { - return &Template{name: name} -} - -func (e *Template) CompileInline(rootNode *NodeRoot, - instructions *template.InstructionSet, nodes *template.Nodes) ([]template.Line, error) { - - return e.compile(rootNode, instructions, nodes) -} - -func (e *Template) Compile(rootNode *NodeRoot) (*template.CompiledTemplate, error) { - instructions := template.NewInstructionSet() - nodes := template.NewNodes() - - code, err := e.compile(rootNode, instructions, nodes) - if err != nil { - return nil, err - } - - return template.NewCompiledTemplate( - e.name, code, instructions, nodes, - template.EvaluationCtxDialects{ - EvaluationCtxDialectName: EvaluationCtx{}, - }, - ), nil -} - -func (e *Template) compile(rootNode *NodeRoot, - instructions *template.InstructionSet, nodes *template.Nodes) ([]template.Line, error) { - - code := []template.Line{} - rootNodeTag := nodes.AddRootNode(&NodeRoot{}) // fresh copy to avoid leaking out NodeCode - - code = append(code, []template.Line{ - {Instruction: instructions.NewSetCtxType(EvaluationCtxDialectName)}, - {Instruction: instructions.NewStartCtx(EvaluationCtxDialectName)}, - {Instruction: instructions.NewStartNode(rootNodeTag)}, - }...) - - var trimSpaceRight bool - - for i, node := range rootNode.Items { - switch typedNode := node.(type) { - case *NodeText: - if trimSpaceRight { - typedNode.Content = strings.TrimLeftFunc(typedNode.Content, unicode.IsSpace) - trimSpaceRight = false - } - - nodeTag := nodes.AddNode(typedNode, rootNodeTag) - - code = append(code, template.Line{ - Instruction: instructions.NewStartNode(nodeTag), - SourceLine: template.NewSourceLine(typedNode.Position, typedNode.Content), - }) - - code = append(code, template.Line{ - Instruction: instructions.NewSetNode(nodeTag), - SourceLine: template.NewSourceLine(typedNode.Position, typedNode.Content), - }) - - case *NodeCode: - meta := NodeCodeMeta{typedNode} - trimSpaceRight = meta.ShouldTrimSpaceRight() - - if meta.ShoudTrimSpaceLeft() && i != 0 { - if typedLastNode, ok := rootNode.Items[i-1].(*NodeText); ok { - typedLastNode.Content = strings.TrimRightFunc( - typedLastNode.Content, unicode.IsSpace) - } - } - - if meta.ShouldPrint() { - nodeTag := nodes.AddNode(&NodeText{}, rootNodeTag) - - code = append(code, template.Line{ - Instruction: instructions.NewStartNode(nodeTag), - SourceLine: template.NewSourceLine(typedNode.Position, typedNode.Content), - }) - - code = append(code, template.Line{ - Instruction: instructions.NewSetNodeValue(nodeTag, meta.Code()), - SourceLine: template.NewSourceLine(typedNode.Position, typedNode.Content), - }) - } else { - code = append(code, template.NewCodeFromBytesAtPosition( - []byte(meta.Code()), typedNode.Position, instructions)...) - } - - default: - panic(fmt.Sprintf("unknown string template node %T", typedNode)) - } - } - - code = append(code, template.Line{ - Instruction: instructions.NewEndCtx(), - }) - - return code, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/version/version.go b/vendor/github.com/k14s/ytt/pkg/version/version.go deleted file mode 100644 index 3ca685ad0..000000000 --- a/vendor/github.com/k14s/ytt/pkg/version/version.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package version - -var Version = "develop" diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/data_loader.go b/vendor/github.com/k14s/ytt/pkg/workspace/data_loader.go deleted file mode 100644 index 21f55f4d9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/data_loader.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "github.com/k14s/ytt/pkg/files" -) - -type DataLoader struct { - libraryCtx LibraryExecutionContext -} - -func (l DataLoader) FilePaths(path string) ([]string, error) { - library := l.libraryCtx.Current - fromRoot := false - - if files.IsRootPath(path) { - fromRoot = true - library = l.libraryCtx.Root - path = files.StripRootPath(path) - } - - if len(path) > 0 { - foundLibrary, err := library.FindLibrary(path) - if err != nil { - return nil, err - } - library = foundLibrary - } - - result := []string{} - for _, fileInLib := range library.ListAccessibleFiles() { - path := fileInLib.RelativePath() - // Make it compatible with FileData() - if fromRoot { - path = files.MakeRootPath(path) - } - result = append(result, path) - } - return result, nil -} - -func (l DataLoader) FileData(path string) ([]byte, error) { - library := l.libraryCtx.Current - - if files.IsRootPath(path) { - library = l.libraryCtx.Root - path = files.StripRootPath(path) - } - - fileInLib, err := library.FindFile(path) - if err != nil { - return nil, err - } - - fileBs, err := fileInLib.File.Bytes() - if err != nil { - return nil, err - } - - return fileBs, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/data_values.go b/vendor/github.com/k14s/ytt/pkg/workspace/data_values.go deleted file mode 100644 index 58c61813f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/data_values.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - "strings" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/workspace/ref" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - AnnotationLibraryRef = "library/ref" - - dvsLibrarySep = "@" -) - -type DataValues struct { - Doc *yamlmeta.Document - AfterLibMod bool - used bool - - originalLibRef []ref.LibraryRef - libRef []ref.LibraryRef -} - -func NewDataValues(doc *yamlmeta.Document) (*DataValues, error) { - libRef, afterLibMod, err := parseDVAnnotations(ref.LibraryRefExtractor{}, doc) - if err != nil { - return nil, err - } - - return &DataValues{Doc: doc, AfterLibMod: afterLibMod, libRef: libRef, originalLibRef: libRef}, nil -} - -func NewEmptyDataValues() *DataValues { - return &DataValues{Doc: newEmptyDataValuesDocument()} -} - -func newEmptyDataValuesDocument() *yamlmeta.Document { - return &yamlmeta.Document{ - Value: &yamlmeta.Map{}, - Position: filepos.NewUnknownPosition(), - } -} - -type ExtractLibRefs interface { - FromStr(string) ([]ref.LibraryRef, error) - FromAnnotation(template.NodeAnnotations) ([]ref.LibraryRef, error) -} - -func NewDataValuesWithLib(libRefs ExtractLibRefs, doc *yamlmeta.Document, libRefStr string) (*DataValues, error) { - libRefsFromStr, err := libRefs.FromStr(libRefStr) - if err != nil { - return nil, err - } - - libRefsFromAnnotation, afterLibMod, err := parseDVAnnotations(libRefs, doc) - if err != nil { - return nil, err - } else if len(libRefsFromAnnotation) > 0 { - panic(fmt.Sprintf("Library was provided as arg as well as with %s annotation", AnnotationLibraryRef)) - } - - return &DataValues{Doc: doc, AfterLibMod: afterLibMod, libRef: libRefsFromStr, originalLibRef: libRefsFromStr}, nil -} - -func NewDataValuesWithOptionalLib(doc *yamlmeta.Document, libRefStr string) (*DataValues, error) { - if len(libRefStr) > 0 { - return NewDataValuesWithLib(ref.LibraryRefExtractor{}, doc, libRefStr) - } - return NewDataValues(doc) -} - -func (dvd *DataValues) IsUsed() bool { return dvd.used } -func (dvd *DataValues) markUsed() { dvd.used = true } - -func (dvd *DataValues) Desc() string { - // TODO: Update to output file location of annotation. If no annotation use doc position although these will always be used - var desc []string - for _, refPiece := range dvd.originalLibRef { - desc = append(desc, refPiece.AsString()) - } - return fmt.Sprintf("Data Value belonging to library '%s%s' on %s", dvsLibrarySep, - strings.Join(desc, dvsLibrarySep), dvd.Doc.Position.AsString()) -} - -func (dvd *DataValues) IntendedForAnotherLibrary() bool { return len(dvd.libRef) > 0 } - -func (dvd *DataValues) UsedInLibrary(expectedRefPiece ref.LibraryRef) *DataValues { - if len(dvd.libRef) == 0 { - dvd.markUsed() - return dvd.deepCopy() - } - if !dvd.libRef[0].Matches(expectedRefPiece) { - return nil - } - dvd.markUsed() - childDV := dvd.deepCopy() - childDV.libRef = childDV.libRef[1:] - return childDV -} - -func (dvd *DataValues) deepCopy() *DataValues { - var copiedPieces []ref.LibraryRef - copiedPieces = append(copiedPieces, dvd.libRef...) - return &DataValues{Doc: dvd.Doc.DeepCopy(), AfterLibMod: dvd.AfterLibMod, - libRef: copiedPieces, originalLibRef: dvd.originalLibRef} -} - -func parseDVAnnotations(libRefs ExtractLibRefs, doc *yamlmeta.Document) ([]ref.LibraryRef, bool, error) { - var afterLibMod bool - anns := template.NewAnnotations(doc) - - libRef, err := libRefs.FromAnnotation(anns) - if err != nil { - return nil, false, err - } - - for _, kwarg := range anns.Kwargs(AnnotationDataValues) { - kwargName, err := core.NewStarlarkValue(kwarg[0]).AsString() - if err != nil { - return nil, false, err - } - - switch kwargName { - case "after_library_module": - afterLibMod, err = core.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return nil, false, err - } else if len(libRef) == 0 { - return nil, false, fmt.Errorf("Annotation %s: Expected kwarg 'after_library_module' to be used with %s annotation", - AnnotationDataValues, AnnotationLibraryRef) - } - default: - return nil, false, fmt.Errorf("Unknown kwarg %s for annotation %s", kwargName, AnnotationDataValues) - } - } - return libRef, afterLibMod, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/data_values_pre_processing.go b/vendor/github.com/k14s/ytt/pkg/workspace/data_values_pre_processing.go deleted file mode 100644 index c984ff7ae..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/data_values_pre_processing.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/schema" - "github.com/k14s/ytt/pkg/yamlmeta" - yttoverlay "github.com/k14s/ytt/pkg/yttlibrary/overlay" -) - -type DataValuesPreProcessing struct { - valuesFiles []*FileInLibrary - valuesOverlays []*DataValues - schema Schema - loader *TemplateLoader - IgnoreUnknownComments bool // TODO remove? -} - -func (o DataValuesPreProcessing) Apply() (*DataValues, []*DataValues, error) { - files := append([]*FileInLibrary{}, o.valuesFiles...) - - // Respect assigned file order for data values overlaying to succeed - SortFilesInLibrary(files) - - dataValues, libraryDataValues, err := o.apply(files) - if err != nil { - errMsg := "Overlaying data values (in following order: %s): %s" - return nil, nil, fmt.Errorf(errMsg, o.allFileDescs(files), err) - } - - return dataValues, libraryDataValues, nil -} - -func (o DataValuesPreProcessing) apply(files []*FileInLibrary) (*DataValues, []*DataValues, error) { - allDvs, err := o.collectDataValuesDocs(files) - if err != nil { - return nil, nil, err - } - - // merge all Data Values YAML documents into one - var otherLibraryDVs []*DataValues - var resultDVsDoc *yamlmeta.Document - for _, dv := range allDvs { - if dv.IntendedForAnotherLibrary() { - otherLibraryDVs = append(otherLibraryDVs, dv) - continue - } - - if resultDVsDoc == nil { - resultDVsDoc = dv.Doc - } else { - resultDVsDoc, err = o.overlay(resultDVsDoc, dv.Doc) - if err != nil { - return nil, nil, err - } - } - typeCheck := o.typeAndCheck(resultDVsDoc) - if len(typeCheck.Violations) > 0 { - return nil, nil, schema.NewSchemaError("One or more data values were invalid", typeCheck.Violations...) - } - } - - if resultDVsDoc == nil { - resultDVsDoc = newEmptyDataValuesDocument() - } - dataValues, err := NewDataValues(resultDVsDoc) - if err != nil { - return nil, nil, err - } - return dataValues, otherLibraryDVs, nil -} - -func (o DataValuesPreProcessing) collectDataValuesDocs(files []*FileInLibrary) ([]*DataValues, error) { - var allDvs []*DataValues - if defaults := o.schema.DefaultDataValues(); defaults != nil { - dv, err := NewDataValues(defaults) - if err != nil { - return nil, err - } - // o.schema has already been determined to be the schema for the current library. - // set the default data value libref to nil, signaling that it is for the current library. - dv.libRef = nil - allDvs = append(allDvs, dv) - } - for _, fileInLib := range files { - docs, err := o.extractDataValueDocs(fileInLib) - if err != nil { - return nil, fmt.Errorf("Templating file '%s': %s", fileInLib.File.RelativePath(), err) - } - for _, doc := range docs { - dv, err := NewDataValues(doc) - if err != nil { - return nil, err - } - allDvs = append(allDvs, dv) - } - } - allDvs = append(allDvs, o.valuesOverlays...) - return allDvs, nil -} - -func (o DataValuesPreProcessing) typeAndCheck(dataValuesDoc *yamlmeta.Document) yamlmeta.TypeCheck { - chk := o.schema.AssignType(dataValuesDoc) - if _, checkable := o.schema.(*schema.DocumentSchema); checkable { - if len(chk.Violations) > 0 { - return chk - } - chk = dataValuesDoc.Check() - } - return chk -} - -func (o DataValuesPreProcessing) allFileDescs(files []*FileInLibrary) string { - var result []string - for _, fileInLib := range files { - result = append(result, fileInLib.File.RelativePath()) - } - if len(o.valuesOverlays) > 0 { - result = append(result, "additional data values") - } - return strings.Join(result, ", ") -} - -func (o DataValuesPreProcessing) extractDataValueDocs(fileInLib *FileInLibrary) ([]*yamlmeta.Document, error) { - libraryCtx := LibraryExecutionContext{Current: fileInLib.Library, Root: NewRootLibrary(nil)} - - _, resultDocSet, err := o.loader.EvalYAML(libraryCtx, fileInLib.File) - if err != nil { - return nil, err - } - - // Extract _all_ data values docs from the templated result - valuesDocs, nonValuesDocs, err := DocExtractor{resultDocSet}.Extract(AnnotationDataValues) - if err != nil { - return nil, err - } - - // Fail if there any non-empty docs that are not data values - if len(nonValuesDocs) > 0 { - for _, doc := range nonValuesDocs { - if !doc.IsEmpty() { - errStr := "Expected data values file '%s' to only have data values documents" - return nil, fmt.Errorf(errStr, fileInLib.File.RelativePath()) - } - } - } - - return valuesDocs, nil -} - -func (o DataValuesPreProcessing) overlay(dataValues, overlay *yamlmeta.Document) (*yamlmeta.Document, error) { - op := yttoverlay.Op{ - Left: &yamlmeta.DocumentSet{Items: []*yamlmeta.Document{dataValues}}, - Right: &yamlmeta.DocumentSet{Items: []*yamlmeta.Document{overlay}}, - Thread: &starlark.Thread{Name: "data-values-pre-processing"}, - - ExactMatch: true, - } - - newLeft, err := op.Apply() - if err != nil { - return nil, err - } - - return newLeft.(*yamlmeta.DocumentSet).Items[0], nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/doc_extractor.go b/vendor/github.com/k14s/ytt/pkg/workspace/doc_extractor.go deleted file mode 100644 index 06fd5b8fe..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/doc_extractor.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -const ( - AnnotationDataValues template.AnnotationName = "data/values" - AnnotationDataValuesSchema template.AnnotationName = "data/values-schema" -) - -type DocExtractor struct { - DocSet *yamlmeta.DocumentSet -} - -func (v DocExtractor) Extract(annName template.AnnotationName) ([]*yamlmeta.Document, - []*yamlmeta.Document, error) { - - err := v.checkNonDocs(v.DocSet, annName) - if err != nil { - return nil, nil, err - } - - matchedDocs, nonMatchedDocs, err := v.extract(v.DocSet, annName) - if err != nil { - return nil, nil, err - } - - return matchedDocs, nonMatchedDocs, nil -} - -func (v DocExtractor) extract(docSet *yamlmeta.DocumentSet, - annName template.AnnotationName) ([]*yamlmeta.Document, []*yamlmeta.Document, error) { - - var matchedDocs []*yamlmeta.Document - var nonMatchedDocs []*yamlmeta.Document - - for _, doc := range docSet.Items { - var hasMatchingAnn bool - - for _, comment := range doc.GetComments() { - // TODO potentially use template.NewAnnotations(doc).Has(yttoverlay.AnnotationMatch) - // however if doc was not processed by the template, it wont have any annotations set - meta, err := yamltemplate.NewTemplateMetaFromYAMLComment(comment, yamltemplate.MetasOpts{IgnoreUnknown: true}) - if err != nil { - return nil, nil, err - } - for _, ann := range meta.Annotations { - if ann.Name == annName { - if hasMatchingAnn { - return nil, nil, fmt.Errorf("%s annotation may only be used once per YAML doc", annName) - } - hasMatchingAnn = true - } - } - } - - if hasMatchingAnn { - matchedDocs = append(matchedDocs, doc) - } else { - nonMatchedDocs = append(nonMatchedDocs, doc) - } - } - - return matchedDocs, nonMatchedDocs, nil -} - -func (v DocExtractor) checkNonDocs(val interface{}, annName template.AnnotationName) error { - node, ok := val.(yamlmeta.Node) - if !ok { - return nil - } - - for _, comment := range node.GetComments() { - meta, err := yamltemplate.NewTemplateMetaFromYAMLComment(comment, yamltemplate.MetasOpts{IgnoreUnknown: true}) - if err != nil { - return err - } - - for _, ann := range meta.Annotations { - if ann.Name == annName { - // TODO check for annotation emptiness - _, isDoc := node.(*yamlmeta.Document) - if !isDoc { - errMsg := "Expected YAML document to be annotated with %s but was %T" - return fmt.Errorf(errMsg, annName, node) - } - } - } - } - - for _, childVal := range node.GetValues() { - err := v.checkNonDocs(childVal, annName) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/library.go b/vendor/github.com/k14s/ytt/pkg/workspace/library.go deleted file mode 100644 index 1f48dd7d0..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/library.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "bytes" - "fmt" - "io" - "sort" - "strings" - - "github.com/k14s/ytt/pkg/files" -) - -const ( - privateName = "_ytt_lib" -) - -type Library struct { - name string - private bool // in _ytt_lib - children []*Library - files []*files.File -} - -func NewRootLibrary(fs []*files.File) *Library { - rootLibrary := &Library{} - - for _, file := range fs { - dirPieces, _ := files.SplitPath(file.RelativePath()) - - var currLibrary *Library = rootLibrary - for _, piece := range dirPieces { - lib, found := currLibrary.findLibrary(piece) - if !found { - currLibrary = currLibrary.CreateLibrary(piece) - } else { - currLibrary = lib - } - } - - currLibrary.files = append(currLibrary.files, file) - } - - return rootLibrary -} - -func (l *Library) findLibrary(name string) (*Library, bool) { - for _, lib := range l.children { - if lib.name == name { - return lib, true - } - } - return nil, false -} - -func (l *Library) CreateLibrary(name string) *Library { - lib := &Library{name: name, private: name == privateName} - l.children = append(l.children, lib) - return lib -} - -func (l *Library) FindAccessibleLibrary(path string) (*Library, error) { - dirPieces, namePiece := files.SplitPath(path) - pieces := append(dirPieces, namePiece) - - privateLib, found := l.findPrivateLibrary() - if !found { - return nil, fmt.Errorf("Could not find private library (directory '%s' missing?)", privateName) - } - - var currLibrary *Library = privateLib - for i, piece := range pieces { - lib, found := currLibrary.findLibrary(piece) - if !found { - return nil, fmt.Errorf("Expected to find library '%s', but did not find '%s'", - path, files.JoinPath(pieces[:i])) - } - if lib.private { - return nil, fmt.Errorf("Could not load private library '%s'", - files.JoinPath(pieces[:i])) - } - currLibrary = lib - } - - return currLibrary, nil -} - -func (l *Library) findPrivateLibrary() (*Library, bool) { - for _, lib := range l.children { - if lib.private { - return lib, true - } - } - return nil, false -} - -func (l *Library) FindLibrary(path string) (*Library, error) { - dirPieces, namePiece := files.SplitPath(path) - - var currLibrary *Library = l - for i, piece := range append(dirPieces, namePiece) { - lib, found := currLibrary.findLibrary(piece) - if !found { - return nil, fmt.Errorf("Did not find '%s'", files.JoinPath(dirPieces[:i])) - } - if lib.private { - return nil, fmt.Errorf("Encountered private library '%s'", privateName) - } - currLibrary = lib - } - - return currLibrary, nil -} - -func (l *Library) FindFile(path string) (FileInLibrary, error) { - dirPieces, namePiece := files.SplitPath(path) - - var currLibrary *Library = l - - if len(dirPieces) > 0 { - lib, err := l.FindLibrary(files.JoinPath(dirPieces)) - if err != nil { - return FileInLibrary{}, fmt.Errorf("Expected to find file '%s', but did not: %s", path, err) - } - currLibrary = lib - } - - for _, file := range currLibrary.files { - _, fileNamePiece := files.SplitPath(file.RelativePath()) - if fileNamePiece == namePiece { - return FileInLibrary{File: file, Library: currLibrary}, nil - } - } - - return FileInLibrary{}, fmt.Errorf( - "Expected to find file '%s' (hint: only files included via -f flag are available)", path) -} - -type FileInLibrary struct { - File *files.File - Library *Library - parentLibraries []*Library -} - -func (fileInLib *FileInLibrary) RelativePath() string { - var components []string - for _, lib := range fileInLib.parentLibraries { - components = append(components, lib.name) - } - _, fileName := files.SplitPath(fileInLib.File.RelativePath()) - components = append(components, fileName) - return files.JoinPath(components) -} - -func (l *Library) ListAccessibleFiles() []*FileInLibrary { - return l.listAccessibleFiles(nil) -} - -func (l *Library) listAccessibleFiles(parents []*Library) []*FileInLibrary { - var result []*FileInLibrary - for _, file := range l.files { - result = append(result, &FileInLibrary{ - File: file, - Library: l, - parentLibraries: parents, - }) - } - for _, lib := range l.children { - if !lib.private { - newParents := append(append([]*Library{}, parents...), lib) - result = append(result, lib.listAccessibleFiles(newParents)...) - } - } - return result -} - -func (l *Library) Print(out io.Writer) { - l.print(out, 0) -} - -func (l *Library) PrintStr() string { - var buf bytes.Buffer - l.print(&buf, 0) - return buf.String() -} - -func (l *Library) print(out io.Writer, indent int) { - indentStr := strings.Repeat(" ", indent) - fmt.Fprintf(out, "%s- %s (private %t)\n", indentStr, l.name, l.private) - - fmt.Fprintf(out, "%s files:\n", indentStr) - if len(l.files) == 0 { - fmt.Fprintf(out, "%s \n", indentStr) - } - for _, file := range l.files { - fmt.Fprintf(out, "%s - %s\n", indentStr, file.RelativePath()) - } - - fmt.Fprintf(out, "%s libraries:\n", indentStr) - if len(l.children) == 0 { - fmt.Fprintf(out, "%s \n", indentStr) - } - for _, lib := range l.children { - lib.print(out, indent+1) - } -} - -func SortFilesInLibrary(files []*FileInLibrary) { - sort.SliceStable(files, func(i, j int) bool { - return files[i].File.OrderLess(files[j].File) - }) -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/library_execution.go b/vendor/github.com/k14s/ytt/pkg/workspace/library_execution.go deleted file mode 100644 index b031c4c4c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/library_execution.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "github.com/k14s/ytt/pkg/schema" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" - yttoverlay "github.com/k14s/ytt/pkg/yttlibrary/overlay" -) - -type LibraryExecution struct { - libraryCtx LibraryExecutionContext - ui ui.UI - templateLoaderOpts TemplateLoaderOpts - libraryExecFactory *LibraryExecutionFactory -} - -type EvalResult struct { - Files []files.OutputFile - DocSet *yamlmeta.DocumentSet - Exports []EvalExport -} - -type EvalExport struct { - Path string - Symbols starlark.StringDict -} - -func NewLibraryExecution(libraryCtx LibraryExecutionContext, - ui ui.UI, templateLoaderOpts TemplateLoaderOpts, - libraryExecFactory *LibraryExecutionFactory) *LibraryExecution { - - return &LibraryExecution{ - libraryCtx: libraryCtx, - ui: ui, - templateLoaderOpts: templateLoaderOpts, - libraryExecFactory: libraryExecFactory, - } -} - -func (ll *LibraryExecution) Schemas(schemaOverlays []*schema.DocumentSchemaEnvelope) (Schema, []*schema.DocumentSchemaEnvelope, error) { - loader := NewTemplateLoader(NewEmptyDataValues(), nil, nil, ll.templateLoaderOpts, ll.libraryExecFactory, ll.ui) - - schemaFiles, err := ll.schemaFiles(loader) - if err != nil { - return nil, nil, err - } - - documentSchemas, err := collectSchemaDocs(schemaFiles, loader) - if err != nil { - return nil, nil, err - } - - documentSchemas = append(documentSchemas, schemaOverlays...) - - var resultSchemasDoc *yamlmeta.Document - var childLibrarySchemas []*schema.DocumentSchemaEnvelope - for _, docSchema := range documentSchemas { - if docSchema.IntendedForAnotherLibrary() { - childLibrarySchemas = append(childLibrarySchemas, docSchema) - continue - } - if resultSchemasDoc == nil { - resultSchemasDoc = docSchema.Source() - } else { - resultSchemasDoc, err = ll.overlay(resultSchemasDoc, docSchema.Source()) - if err != nil { - return nil, nil, err - } - } - } - if resultSchemasDoc != nil { - currentLibrarySchema, err := schema.NewDocumentSchema(resultSchemasDoc) - if err != nil { - return nil, nil, err - } - return currentLibrarySchema, childLibrarySchemas, nil - } - return schema.NewNullSchema(), childLibrarySchemas, nil -} - -func collectSchemaDocs(schemaFiles []*FileInLibrary, loader *TemplateLoader) ([]*schema.DocumentSchemaEnvelope, error) { - var documentSchemas []*schema.DocumentSchemaEnvelope - for _, file := range schemaFiles { - libraryCtx := LibraryExecutionContext{Current: file.Library, Root: NewRootLibrary(nil)} - - _, resultDocSet, err := loader.EvalYAML(libraryCtx, file.File) - if err != nil { - return nil, err - } - - docs, _, err := DocExtractor{resultDocSet}.Extract(AnnotationDataValuesSchema) - if err != nil { - return nil, err - } - for _, doc := range docs { - newSchema, err := schema.NewDocumentSchemaEnvelope(doc) - if err != nil { - return nil, err - } - documentSchemas = append(documentSchemas, newSchema) - } - } - return documentSchemas, nil -} - -func (ll *LibraryExecution) overlay(schema, overlay *yamlmeta.Document) (*yamlmeta.Document, error) { - op := yttoverlay.Op{ - Left: &yamlmeta.DocumentSet{Items: []*yamlmeta.Document{schema}}, - Right: &yamlmeta.DocumentSet{Items: []*yamlmeta.Document{overlay}}, - Thread: &starlark.Thread{Name: "schema-pre-processing"}, - - ExactMatch: true, - } - - newLeft, err := op.Apply() - if err != nil { - return nil, err - } - - return newLeft.(*yamlmeta.DocumentSet).Items[0], nil -} - -func (ll *LibraryExecution) Values(valuesOverlays []*DataValues, schema Schema) (*DataValues, []*DataValues, error) { - loader := NewTemplateLoader(NewEmptyDataValues(), nil, nil, ll.templateLoaderOpts, ll.libraryExecFactory, ll.ui) - - valuesFiles, err := ll.valuesFiles(loader) - if err != nil { - return nil, nil, err - } - - dvpp := DataValuesPreProcessing{ - valuesFiles: valuesFiles, - valuesOverlays: valuesOverlays, - schema: schema, - loader: loader, - IgnoreUnknownComments: ll.templateLoaderOpts.IgnoreUnknownComments, - } - - return dvpp.Apply() -} - -func (ll *LibraryExecution) schemaFiles(loader *TemplateLoader) ([]*FileInLibrary, error) { - return ll.filesByAnnotation(AnnotationDataValuesSchema, loader) -} - -func (ll *LibraryExecution) valuesFiles(loader *TemplateLoader) ([]*FileInLibrary, error) { - return ll.filesByAnnotation(AnnotationDataValues, loader) - -} - -func (ll *LibraryExecution) filesByAnnotation(annName template.AnnotationName, loader *TemplateLoader) ([]*FileInLibrary, error) { - var valuesFiles []*FileInLibrary - - for _, fileInLib := range ll.libraryCtx.Current.ListAccessibleFiles() { - if fileInLib.File.Type() == files.TypeYAML && fileInLib.File.IsTemplate() { - docSet, err := loader.EvalPlainYAML(fileInLib.File) - if err != nil { - return nil, err - } - - values, _, err := DocExtractor{docSet}.Extract(annName) - if err != nil { - return nil, err - } - - if len(values) > 0 { - valuesFiles = append(valuesFiles, fileInLib) - fileInLib.File.MarkForOutput(false) - } - } - } - - return valuesFiles, nil -} - -func (ll *LibraryExecution) Eval(values *DataValues, libraryValues []*DataValues, librarySchemas []*schema.DocumentSchemaEnvelope) (*EvalResult, error) { - exports, docSets, outputFiles, err := ll.eval(values, libraryValues, librarySchemas) - if err != nil { - return nil, err - } - - docSets, err = (&OverlayPostProcessing{docSets: docSets}).Apply() - if err != nil { - return nil, err - } - - result := &EvalResult{ - Files: outputFiles, - DocSet: &yamlmeta.DocumentSet{}, - Exports: exports, - } - - for _, fileInLib := range ll.sortedOutputDocSets(docSets) { - docSet := docSets[fileInLib] - result.DocSet.Items = append(result.DocSet.Items, docSet.Items...) - - resultDocBytes, err := docSet.AsBytes() - if err != nil { - return nil, fmt.Errorf("Marshaling template result: %s", err) - } - - ll.ui.Debugf("### %s result\n%s", fileInLib.RelativePath(), resultDocBytes) - result.Files = append(result.Files, files.NewOutputFile(fileInLib.RelativePath(), resultDocBytes, fileInLib.File.Type())) - } - - return result, nil -} - -func (ll *LibraryExecution) eval(values *DataValues, libraryValues []*DataValues, librarySchemas []*schema.DocumentSchemaEnvelope) ([]EvalExport, map[*FileInLibrary]*yamlmeta.DocumentSet, []files.OutputFile, error) { - - loader := NewTemplateLoader(values, libraryValues, librarySchemas, ll.templateLoaderOpts, ll.libraryExecFactory, ll.ui) - - exports := []EvalExport{} - docSets := map[*FileInLibrary]*yamlmeta.DocumentSet{} - outputFiles := []files.OutputFile{} - - for _, fileInLib := range ll.libraryCtx.Current.ListAccessibleFiles() { - libraryCtx := LibraryExecutionContext{Current: fileInLib.Library, Root: ll.libraryCtx.Root} - - switch { - case fileInLib.File.IsForOutput(): - // Do not collect globals produced by templates - switch fileInLib.File.Type() { - case files.TypeYAML: - _, resultDocSet, err := loader.EvalYAML(libraryCtx, fileInLib.File) - if err != nil { - return nil, nil, nil, err - } - - docSets[fileInLib] = resultDocSet - - case files.TypeText: - _, resultVal, err := loader.EvalText(libraryCtx, fileInLib.File) - if err != nil { - return nil, nil, nil, err - } - - resultStr := resultVal.AsString() - - ll.ui.Debugf("### %s result\n%s", fileInLib.RelativePath(), resultStr) - outputFiles = append(outputFiles, files.NewOutputFile(fileInLib.RelativePath(), []byte(resultStr), fileInLib.File.Type())) - - default: - return nil, nil, nil, fmt.Errorf("Unknown file type") - } - - case fileInLib.File.IsLibrary(): - // Collect globals produced by library files - var evalFunc func(LibraryExecutionContext, *files.File) (starlark.StringDict, error) - - switch fileInLib.File.Type() { - case files.TypeYAML: - evalFunc = func(libraryCtx LibraryExecutionContext, file *files.File) (starlark.StringDict, error) { - globals, _, err := loader.EvalYAML(libraryCtx, fileInLib.File) - return globals, err - } - - case files.TypeText: - evalFunc = func(libraryCtx LibraryExecutionContext, file *files.File) (starlark.StringDict, error) { - globals, _, err := loader.EvalText(libraryCtx, fileInLib.File) - return globals, err - } - - case files.TypeStarlark: - evalFunc = loader.EvalStarlark - - default: - // TODO should we allow skipping over unknown library files? - // do nothing - } - - if evalFunc != nil { - globals, err := evalFunc(libraryCtx, fileInLib.File) - if err != nil { - return nil, nil, nil, err - } - - exports = append(exports, EvalExport{Path: fileInLib.RelativePath(), Symbols: globals}) - } - - default: - // do nothing - } - } - - return exports, docSets, outputFiles, ll.checkUnusedDVsOrSchemas(libraryValues, librarySchemas) -} - -func (*LibraryExecution) sortedOutputDocSets(outputDocSets map[*FileInLibrary]*yamlmeta.DocumentSet) []*FileInLibrary { - var files []*FileInLibrary - for file := range outputDocSets { - files = append(files, file) - } - SortFilesInLibrary(files) - return files -} - -func (LibraryExecution) checkUnusedDVsOrSchemas(libraryValues []*DataValues, librarySchemas []*schema.DocumentSchemaEnvelope) error { - var unusedValuesDescs []string - var unusedDocTypes []string - numDVNotUsed := 0 - - for _, dv := range libraryValues { - if !dv.IsUsed() { - unusedValuesDescs = append(unusedValuesDescs, dv.Desc()) - } - } - - if numDVNotUsed = len(unusedValuesDescs); numDVNotUsed > 0 { - unusedDocTypes = append(unusedDocTypes, "data values") - } - - for _, s := range librarySchemas { - if !s.IsUsed() { - unusedValuesDescs = append(unusedValuesDescs, s.Desc()) - } - } - if len(unusedValuesDescs) > numDVNotUsed { - unusedDocTypes = append(unusedDocTypes, "schema") - } - - if len(unusedValuesDescs) == 0 { - return nil - } - - return fmt.Errorf("Expected all provided library %s documents "+ - "to be used but found unused: %s", strings.Join(unusedDocTypes, ", and "), strings.Join(unusedValuesDescs, ", ")) -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/library_execution_factory.go b/vendor/github.com/k14s/ytt/pkg/workspace/library_execution_factory.go deleted file mode 100644 index b83154cca..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/library_execution_factory.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "github.com/k14s/ytt/pkg/cmd/ui" -) - -type LibraryExecutionContext struct { - Current *Library - Root *Library -} - -type LibraryExecutionFactory struct { - ui ui.UI - templateLoaderOpts TemplateLoaderOpts -} - -func NewLibraryExecutionFactory(ui ui.UI, templateLoaderOpts TemplateLoaderOpts) *LibraryExecutionFactory { - return &LibraryExecutionFactory{ui, templateLoaderOpts} -} - -func (f *LibraryExecutionFactory) WithTemplateLoaderOptsOverrides(overrides TemplateLoaderOptsOverrides) *LibraryExecutionFactory { - return NewLibraryExecutionFactory(f.ui, f.templateLoaderOpts.Merge(overrides)) -} - -func (f *LibraryExecutionFactory) New(ctx LibraryExecutionContext) *LibraryExecution { - return NewLibraryExecution(ctx, f.ui, f.templateLoaderOpts, f) -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/library_module.go b/vendor/github.com/k14s/ytt/pkg/workspace/library_module.go deleted file mode 100644 index f9f4eca73..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/library_module.go +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/schema" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/workspace/ref" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -type LibraryModule struct { - libraryCtx LibraryExecutionContext - libraryExecutionFactory *LibraryExecutionFactory - libraryValues []*DataValues - librarySchemas []*schema.DocumentSchemaEnvelope -} - -func NewLibraryModule(libraryCtx LibraryExecutionContext, - libraryExecutionFactory *LibraryExecutionFactory, - libraryValues []*DataValues, librarySchemas []*schema.DocumentSchemaEnvelope) LibraryModule { - - return LibraryModule{libraryCtx, libraryExecutionFactory, libraryValues, librarySchemas} -} - -func (b LibraryModule) AsModule() starlark.StringDict { - return starlark.StringDict{ - "library": &starlarkstruct.Module{ - Name: "library", - Members: starlark.StringDict{ - "get": starlark.NewBuiltin("library.get", core.ErrWrapper(b.Get)), - }, - }, - } -} - -func (b LibraryModule) Get(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - libPath, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - libAlias, tplLoaderOptsOverrides, err := b.getOpts(kwargs) - if err != nil { - return starlark.None, err - } - - if strings.HasPrefix(libPath, "@") { - return starlark.None, fmt.Errorf( - "Expected library '%s' to be specified without '@'", libPath) - } - - foundLib, err := b.libraryCtx.Current.FindAccessibleLibrary(libPath) - if err != nil { - return starlark.None, err - } - - // copy over library values - dataValuess := append([]*DataValues{}, b.libraryValues...) - libraryCtx := LibraryExecutionContext{Current: foundLib, Root: foundLib} - - return (&libraryValue{libPath, libAlias, dataValuess, b.librarySchemas, libraryCtx, - b.libraryExecutionFactory.WithTemplateLoaderOptsOverrides(tplLoaderOptsOverrides), - }).AsStarlarkValue(), nil -} - -func (b LibraryModule) getOpts(kwargs []starlark.Tuple) (string, TemplateLoaderOptsOverrides, error) { - var alias string - var overrides TemplateLoaderOptsOverrides - - for _, kwarg := range kwargs { - name, err := core.NewStarlarkValue(kwarg[0]).AsString() - if err != nil { - return "", overrides, err - } - - switch name { - case "alias": - val, err := core.NewStarlarkValue(kwarg[1]).AsString() - if err != nil { - return "", overrides, err - } - alias = val - - case "ignore_unknown_comments": - result, err := core.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return "", overrides, err - } - overrides.IgnoreUnknownComments = &result - - case "implicit_map_key_overrides": - result, err := core.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return "", overrides, err - } - overrides.ImplicitMapKeyOverrides = &result - - case "strict": - result, err := core.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return "", overrides, err - } - overrides.StrictYAML = &result - - default: - return "", overrides, fmt.Errorf("Unexpected kwarg '%s'", name) - } - } - - return alias, overrides, nil -} - -type libraryValue struct { - path string - alias string - dataValuess []*DataValues - schemas []*schema.DocumentSchemaEnvelope - - libraryCtx LibraryExecutionContext - libraryExecutionFactory *LibraryExecutionFactory -} - -func (l *libraryValue) AsStarlarkValue() starlark.Value { - desc := ref.LibraryRef{Path: l.path, Alias: l.alias}.AsString() - evalErrMsg := fmt.Sprintf("Evaluating library '%s'", desc) - exportErrMsg := fmt.Sprintf("Exporting from library '%s'", desc) - - // TODO technically not a module; switch to struct? - return &starlarkstruct.Module{ - Name: "library", - Members: starlark.StringDict{ - "with_data_values": starlark.NewBuiltin("library.with_data_values", core.ErrWrapper(l.WithDataValues)), - "with_data_values_schema": starlark.NewBuiltin("library.with_data_values_schema", core.ErrWrapper(l.WithDataValuesSchema)), - "eval": starlark.NewBuiltin("library.eval", core.ErrWrapper(core.ErrDescWrapper(evalErrMsg, l.Eval))), - "export": starlark.NewBuiltin("library.export", core.ErrWrapper(core.ErrDescWrapper(exportErrMsg, l.Export))), - "data_values": starlark.NewBuiltin("library.data_values", core.ErrWrapper(core.ErrDescWrapper(exportErrMsg, l.DataValues))), - }, - } -} - -func (l *libraryValue) WithDataValues(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - dataValues, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - - valsYAML, err := NewDataValues(&yamlmeta.Document{ - Value: yamlmeta.NewASTFromInterfaceWithNoPosition(dataValues), - Position: filepos.NewUnknownPosition(), - }) - if err != nil { - return starlark.None, err - } - - // copy over library values - newDataValuess := append([]*DataValues{}, l.dataValuess...) - newDataValuess = append(newDataValuess, valsYAML) - - libVal := &libraryValue{l.path, l.alias, newDataValuess, l.schemas, l.libraryCtx, l.libraryExecutionFactory} - - return libVal.AsStarlarkValue(), nil -} - -func (l *libraryValue) WithDataValuesSchema(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - libSchema, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - - newDocSchema, err := schema.NewDocumentSchemaEnvelope(&yamlmeta.Document{ - Value: yamlmeta.NewASTFromInterface(libSchema), - Position: filepos.NewUnknownPosition(), - }) - if err != nil { - return starlark.None, err - } - - newLibSchemas := append([]*schema.DocumentSchemaEnvelope{}, l.schemas...) - newLibSchemas = append(newLibSchemas, newDocSchema) - - libVal := &libraryValue{l.path, l.alias, l.dataValuess, newLibSchemas, l.libraryCtx, l.libraryExecutionFactory} - - return libVal.AsStarlarkValue(), nil -} - -func (l *libraryValue) Eval(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 0 { - return starlark.None, fmt.Errorf("expected no arguments") - } - - libraryExecution := l.libraryExecutionFactory.New(l.libraryCtx) - - schema, librarySchemas, err := l.librarySchemas(libraryExecution) - if err != nil { - return starlark.None, err - } - astValues, libValues, err := l.libraryValues(libraryExecution, schema) - if err != nil { - return starlark.None, err - } - - result, err := libraryExecution.Eval(astValues, libValues, librarySchemas) - if err != nil { - return starlark.None, err - } - - return yamltemplate.NewStarlarkFragment(result.DocSet), nil -} - -func (l *libraryValue) DataValues(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 0 { - return starlark.None, fmt.Errorf("expected no arguments") - } - - libraryExecution := l.libraryExecutionFactory.New(l.libraryCtx) - - schema, _, err := l.librarySchemas(libraryExecution) - if err != nil { - return starlark.None, err - } - astValues, _, err := l.libraryValues(libraryExecution, schema) - if err != nil { - return starlark.None, err - } - - val := core.NewGoValueWithOpts(astValues.Doc.AsInterface(), core.GoValueOpts{MapIsStruct: true}) - return val.AsStarlarkValue(), nil -} - -func (l *libraryValue) Export(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - symbolName, locationPath, err := l.exportArgs(args, kwargs) - if err != nil { - return starlark.None, err - } - - if strings.HasPrefix(symbolName, "_") { - return starlark.None, fmt.Errorf( - "Symbols starting with '_' are private, and cannot be exported") - } - - libraryExecution := l.libraryExecutionFactory.New(l.libraryCtx) - - schema, librarySchemas, err := l.librarySchemas(libraryExecution) - if err != nil { - return starlark.None, err - } - astValues, libValues, err := l.libraryValues(libraryExecution, schema) - if err != nil { - return starlark.None, err - } - - result, err := libraryExecution.Eval(astValues, libValues, librarySchemas) - if err != nil { - return starlark.None, err - } - - foundExports := []EvalExport{} - - for _, exp := range result.Exports { - if _, found := exp.Symbols[symbolName]; found { - if len(locationPath) == 0 || locationPath == exp.Path { - foundExports = append(foundExports, exp) - } - } - } - - switch len(foundExports) { - case 0: - return starlark.None, fmt.Errorf( - "Expected to find exported symbol '%s', but did not", symbolName) - - case 1: - return foundExports[0].Symbols[symbolName], nil - - default: - var paths []string - for _, exp := range foundExports { - paths = append(paths, exp.Path) - } - - return starlark.None, fmt.Errorf("Expected to find exactly "+ - "one exported symbol '%s', but found multiple across files: %s", - symbolName, strings.Join(paths, ", ")) - } -} - -func (l *libraryValue) exportArgs(args starlark.Tuple, kwargs []starlark.Tuple) (string, string, error) { - if args.Len() != 1 { - return "", "", fmt.Errorf("expected exactly one argument") - } - - symbolName, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return "", "", err - } - - var locationPath string - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - - switch kwargName { - case "path": - var err error - locationPath, err = core.NewStarlarkValue(kwarg[1]).AsString() - if err != nil { - return "", "", err - } - - default: - return "", "", fmt.Errorf("Unexpected keyword argument '%s'", kwargName) - } - } - - return symbolName, locationPath, nil -} - -func (l *libraryValue) librarySchemas(ll *LibraryExecution) (Schema, []*schema.DocumentSchemaEnvelope, error) { - var schemasForCurrentLib, schemasForChildLib []*schema.DocumentSchemaEnvelope - - for _, docSchema := range l.schemas { - matchingSchema, usedInCurrLibrary := docSchema.UsedInLibrary(ref.LibraryRef{Path: l.path, Alias: l.alias}) - if usedInCurrLibrary { - schemasForCurrentLib = append(schemasForCurrentLib, matchingSchema) - } else { - schemasForChildLib = append(schemasForChildLib, matchingSchema) - } - } - - schema, librarySchemas, err := ll.Schemas(schemasForCurrentLib) - if err != nil { - return nil, nil, err - } - - foundChildSchemas := append(librarySchemas, schemasForChildLib...) - return schema, foundChildSchemas, nil -} - -func (l *libraryValue) libraryValues(ll *LibraryExecution, schema Schema) (*DataValues, []*DataValues, error) { - var dvss, afterLibModDVss, childDVss []*DataValues - for _, dv := range l.dataValuess { - matchingDVs := dv.UsedInLibrary(ref.LibraryRef{Path: l.path, Alias: l.alias}) - if matchingDVs != nil { - if matchingDVs.IntendedForAnotherLibrary() { - childDVss = append(childDVss, matchingDVs) - } else { - if matchingDVs.AfterLibMod { - afterLibModDVss = append(afterLibModDVss, matchingDVs) - } else { - dvss = append(dvss, matchingDVs) - } - } - } - } - - dvs, foundChildDVss, err := ll.Values(append(dvss, afterLibModDVss...), schema) - if err != nil { - return nil, nil, err - } - - // Order data values specified in a parent library, on top of - // data values specified within a child library - foundChildDVss = append(foundChildDVss, childDVss...) - - return dvs, foundChildDVss, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/overlay_post_processing.go b/vendor/github.com/k14s/ytt/pkg/workspace/overlay_post_processing.go deleted file mode 100644 index 7024335a9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/overlay_post_processing.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" - yttoverlay "github.com/k14s/ytt/pkg/yttlibrary/overlay" -) - -type OverlayPostProcessing struct { - docSets map[*FileInLibrary]*yamlmeta.DocumentSet -} - -func (o OverlayPostProcessing) Apply() (map[*FileInLibrary]*yamlmeta.DocumentSet, error) { - overlayDocSets := map[*FileInLibrary][]*yamlmeta.Document{} - docSetsWithoutOverlays := []*yamlmeta.DocumentSet{} - docSetToFilesMapping := map[*yamlmeta.DocumentSet]*FileInLibrary{} - - for file, docSet := range o.docSets { - var newItems []*yamlmeta.Document - for _, doc := range docSet.Items { - if template.NewAnnotations(doc).Has(yttoverlay.AnnotationMatch) { - overlayDocSets[file] = append(overlayDocSets[file], doc) - } else { - // TODO avoid filtering out docs? - if doc.IsEmpty() { - continue - } - newItems = append(newItems, doc) - } - } - - if len(newItems) > 0 { - docSet.Items = newItems - docSetsWithoutOverlays = append(docSetsWithoutOverlays, docSet) - docSetToFilesMapping[docSet] = file - } - } - - // Respect assigned file order for data values overlaying to succeed - var sortedOverlayFiles []*FileInLibrary - for file := range overlayDocSets { - sortedOverlayFiles = append(sortedOverlayFiles, file) - } - SortFilesInLibrary(sortedOverlayFiles) - - for _, file := range sortedOverlayFiles { - for _, overlay := range overlayDocSets[file] { - op := yttoverlay.Op{ - // special case: array of docsets so that file association can be preserved - Left: docSetsWithoutOverlays, - Right: &yamlmeta.DocumentSet{ - Items: []*yamlmeta.Document{overlay}, - }, - Thread: &starlark.Thread{Name: "overlay-post-processing"}, - } - newLeft, err := op.Apply() - if err != nil { - return nil, fmt.Errorf("Overlaying (in following order: %s): %s", - o.allFileDescs(sortedOverlayFiles), err) - } - docSetsWithoutOverlays = newLeft.([]*yamlmeta.DocumentSet) - } - } - - result := map[*FileInLibrary]*yamlmeta.DocumentSet{} - - for _, docSet := range docSetsWithoutOverlays { - if file, ok := docSetToFilesMapping[docSet]; ok { - result[file] = docSet - } else { - return nil, fmt.Errorf("Expected to find file for docset") - } - } - - return result, nil -} - -func (o OverlayPostProcessing) allFileDescs(files []*FileInLibrary) string { - var result []string - for _, fileInLib := range files { - result = append(result, fileInLib.File.RelativePath()) - } - return strings.Join(result, ", ") -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/ref/library_ref.go b/vendor/github.com/k14s/ytt/pkg/workspace/ref/library_ref.go deleted file mode 100644 index 1ed09563b..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/ref/library_ref.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package ref - -import ( - "fmt" - "strings" - - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/template/core" -) - -const ( - AnnotationLibraryRef = "library/ref" - - librarySep = "@" - libraryAliasIndicator = "~" -) - -type LibraryRef struct { - Path string - Alias string -} - -func (p LibraryRef) Matches(lpp LibraryRef) bool { - pathMatch := p.Path == lpp.Path - if p.Alias == "" { - return pathMatch - } - - aliasMatch := p.Alias == lpp.Alias - if p.Path == "" { - return aliasMatch - } - - return aliasMatch && pathMatch -} - -func (p LibraryRef) AsString() string { - if p.Alias == "" { - return p.Path - } - return p.Path + libraryAliasIndicator + p.Alias -} - -type LibraryRefExtractor struct { -} - -func (n LibraryRefExtractor) FromAnnotation(anns template.NodeAnnotations) ([]LibraryRef, error) { - var libRef []LibraryRef - - if hasLibAnn := anns.Has(AnnotationLibraryRef); hasLibAnn { - libArgs := anns.Args(AnnotationLibraryRef) - if l := libArgs.Len(); l != 1 { - return nil, fmt.Errorf("Expected %s annotation to have one arg, got %d", AnnotationLibraryRef, l) - } - - argString, err := core.NewStarlarkValue(libArgs[0]).AsString() - if err != nil { - return nil, err - } - - libRef, err = n.FromStr(argString) - if err != nil { - return nil, fmt.Errorf("Annotation %s: %s", AnnotationLibraryRef, err.Error()) - } - } - return libRef, nil -} - -func (n LibraryRefExtractor) FromStr(libRefStr string) ([]LibraryRef, error) { - if libRefStr == "" { - return nil, fmt.Errorf("Expected library ref to not be empty") - } - - if !strings.HasPrefix(libRefStr, librarySep) { - return nil, fmt.Errorf("Expected library ref to start with '%s'", librarySep) - } - - var result []LibraryRef - for _, refPiece := range strings.Split(libRefStr, librarySep)[1:] { - pathAndAlias := strings.Split(refPiece, libraryAliasIndicator) - switch l := len(pathAndAlias); { - case l == 1: - result = append(result, LibraryRef{Path: pathAndAlias[0]}) - - case l == 2: - if pathAndAlias[1] == "" { - return nil, fmt.Errorf("Expected library alias to not be empty") - } - - result = append(result, LibraryRef{Path: pathAndAlias[0], Alias: pathAndAlias[1]}) - - default: - return nil, fmt.Errorf("Expected library ref to have form: '@path', '@~alias', or '@path~alias', got: '%s'", libRefStr) - } - } - return result, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/schema.go b/vendor/github.com/k14s/ytt/pkg/workspace/schema.go deleted file mode 100644 index db62defbb..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/schema.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 -package workspace - -import ( - "github.com/k14s/ytt/pkg/schema" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -type Schema interface { - AssignType(typeable yamlmeta.Typeable) yamlmeta.TypeCheck - // DefaultDataValues yields default values for Data Values... - // if schema was built by schema.NewNullSchema (i.e. no schema was provided), returns nil - DefaultDataValues() *yamlmeta.Document -} - -var _ Schema = &schema.DocumentSchema{} diff --git a/vendor/github.com/k14s/ytt/pkg/workspace/template_loader.go b/vendor/github.com/k14s/ytt/pkg/workspace/template_loader.go deleted file mode 100644 index ea2955dfc..000000000 --- a/vendor/github.com/k14s/ytt/pkg/workspace/template_loader.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package workspace - -import ( - "fmt" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/cmd/ui" - "github.com/k14s/ytt/pkg/files" - "github.com/k14s/ytt/pkg/schema" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/texttemplate" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" - "github.com/k14s/ytt/pkg/yttlibrary" -) - -type TemplateLoader struct { - ui ui.UI - values *DataValues - libraryValuess []*DataValues - librarySchemas []*schema.DocumentSchemaEnvelope - opts TemplateLoaderOpts - compiledTemplates map[string]*template.CompiledTemplate - libraryExecFactory *LibraryExecutionFactory -} - -type TemplateLoaderOpts struct { - IgnoreUnknownComments bool - ImplicitMapKeyOverrides bool - StrictYAML bool - SchemaEnabled bool -} - -type TemplateLoaderOptsOverrides struct { - IgnoreUnknownComments *bool - ImplicitMapKeyOverrides *bool - StrictYAML *bool -} - -func NewTemplateLoader(values *DataValues, libraryValuess []*DataValues, librarySchemas []*schema.DocumentSchemaEnvelope, opts TemplateLoaderOpts, libraryExecFactory *LibraryExecutionFactory, ui ui.UI) *TemplateLoader { - - if values == nil { - panic("Expected values to be non-nil") - } - - return &TemplateLoader{ - ui: ui, - values: values, - libraryValuess: libraryValuess, - librarySchemas: librarySchemas, - opts: opts, - compiledTemplates: map[string]*template.CompiledTemplate{}, - libraryExecFactory: libraryExecFactory, - } -} - -func (l *TemplateLoader) FindCompiledTemplate(path string) (*template.CompiledTemplate, error) { - ct, found := l.compiledTemplates[path] - if !found { - return nil, fmt.Errorf("Expected to find '%s' compiled template", path) - } - return ct, nil -} - -func (l *TemplateLoader) Load(thread *starlark.Thread, module string) (starlark.StringDict, error) { - libraryCtx := LibraryExecutionContext{ - Current: l.getCurrentLibrary(thread), - Root: l.getRootLibrary(thread), - } - filePath := module - - if strings.HasPrefix(module, "@") { - pieces := strings.SplitN(module[1:], ":", 2) - if len(pieces) != 2 { - return nil, fmt.Errorf("Expected library path to be in format '@name:path' " + - "e.g. '@github.com/vmware-tanzu/test:test.star' or '@ytt:base64'") - } - - if pieces[0] == "ytt" { - return l.getYTTLibrary(thread).FindModule(pieces[1]) - } - - foundLib, err := libraryCtx.Current.FindAccessibleLibrary(pieces[0]) - if err != nil { - return nil, err - } - - libraryCtx = LibraryExecutionContext{Current: foundLib, Root: foundLib} - filePath = pieces[1] - } - - var libraryWithFile *Library = libraryCtx.Current - - // If path starts from a root, then it should be relative to root library - if files.IsRootPath(filePath) { - libraryWithFile = libraryCtx.Root - filePath = files.StripRootPath(filePath) - } - - fileInLib, err := libraryWithFile.FindFile(filePath) - if err != nil { - return nil, err - } - - // File might be inside nested libraries, make sure to update current library - libraryCtx = LibraryExecutionContext{Current: fileInLib.Library, Root: libraryCtx.Root} - file := fileInLib.File - - if !file.IsLibrary() { - return nil, fmt.Errorf("Expected file '%s' to be a library file, but was not "+ - "(hint: library filename must end with '.lib.yml' or '.star'; use data.read(...) for loading non-templated file contents)", file.RelativePath()) - } - - switch file.Type() { - case files.TypeYAML: - globals, _, err := l.EvalYAML(libraryCtx, file) - return globals, err - - case files.TypeStarlark: - return l.EvalStarlark(libraryCtx, file) - - case files.TypeText: - globals, _, err := l.EvalText(libraryCtx, file) - return globals, err - - default: - return nil, fmt.Errorf("File '%s' type is not a known", file.RelativePath()) - } -} - -func (l *TemplateLoader) EvalPlainYAML(file *files.File) (*yamlmeta.DocumentSet, error) { - fileBs, err := file.Bytes() - if err != nil { - return nil, err - } - - docSetOpts := yamlmeta.DocSetOpts{ - AssociatedName: file.RelativePath(), - WithoutComments: !file.IsTemplate() && !file.IsLibrary(), - Strict: l.opts.StrictYAML, - } - l.ui.Debugf("## file %s (opts %#v)\n", file.RelativePath(), docSetOpts) - - docSet, err := yamlmeta.NewDocumentSetFromBytes(fileBs, docSetOpts) - if err != nil { - return nil, fmt.Errorf("Unmarshaling YAML template '%s': %s", file.RelativePath(), err) - } - - return docSet, nil -} - -func (l *TemplateLoader) EvalYAML(libraryCtx LibraryExecutionContext, file *files.File) (starlark.StringDict, *yamlmeta.DocumentSet, error) { - docSet, err := l.EvalPlainYAML(file) - if err != nil { - return nil, nil, err - } - - l.ui.Debugf("### ast\n") - docSet.Print(l.ui.DebugWriter()) - - if !file.IsTemplate() && !file.IsLibrary() || !yamltemplate.HasTemplating(docSet) { - return nil, docSet, nil - } - - tplOpts := yamltemplate.TemplateOpts{ - IgnoreUnknownComments: l.opts.IgnoreUnknownComments, - ImplicitMapKeyOverrides: l.opts.ImplicitMapKeyOverrides, - } - - compiledTemplate, err := yamltemplate.NewTemplate(file.RelativePath(), tplOpts).Compile(docSet) - if err != nil { - return nil, nil, fmt.Errorf("Compiling YAML template '%s': %s", file.RelativePath(), err) - } - - l.addCompiledTemplate(file.RelativePath(), compiledTemplate) - l.ui.Debugf("### template\n%s", compiledTemplate.DebugCodeAsString()) - - yttLibrary := yttlibrary.NewAPI(compiledTemplate.TplReplaceNode, - yttlibrary.NewDataModule(l.values.Doc, DataLoader{libraryCtx}), - NewLibraryModule(libraryCtx, l.libraryExecFactory, l.libraryValuess, l.librarySchemas).AsModule()) - - thread := l.newThread(libraryCtx, yttLibrary, file) - - globals, resultVal, err := compiledTemplate.Eval(thread, l) - if err != nil { - return nil, nil, err - } - - return globals, resultVal.(*yamlmeta.DocumentSet), nil -} - -func (l *TemplateLoader) EvalText(libraryCtx LibraryExecutionContext, file *files.File) (starlark.StringDict, *texttemplate.NodeRoot, error) { - fileBs, err := file.Bytes() - if err != nil { - return nil, nil, err - } - - l.ui.Debugf("## file %s\n", file.RelativePath()) - - if !file.IsTemplate() && !file.IsLibrary() { - plainRootNode := &texttemplate.NodeRoot{ - Items: []interface{}{&texttemplate.NodeText{Content: string(fileBs)}}, - } - return nil, plainRootNode, nil - } - - textRoot, err := texttemplate.NewParser().Parse(fileBs, file.RelativePath()) - if err != nil { - return nil, nil, fmt.Errorf("Parsing text template '%s': %s", file.RelativePath(), err) - } - - compiledTemplate, err := texttemplate.NewTemplate(file.RelativePath()).Compile(textRoot) - if err != nil { - return nil, nil, fmt.Errorf("Compiling text template '%s': %s", file.RelativePath(), err) - } - - l.addCompiledTemplate(file.RelativePath(), compiledTemplate) - l.ui.Debugf("### template\n%s", compiledTemplate.DebugCodeAsString()) - - yttLibrary := yttlibrary.NewAPI(compiledTemplate.TplReplaceNode, - yttlibrary.NewDataModule(l.values.Doc, DataLoader{libraryCtx}), - NewLibraryModule(libraryCtx, l.libraryExecFactory, l.libraryValuess, l.librarySchemas).AsModule()) - - thread := l.newThread(libraryCtx, yttLibrary, file) - - globals, resultVal, err := compiledTemplate.Eval(thread, l) - if err != nil { - return nil, nil, fmt.Errorf("Evaluating text template: %s", err) - } - - return globals, resultVal.(*texttemplate.NodeRoot), nil -} - -func (l *TemplateLoader) EvalStarlark(libraryCtx LibraryExecutionContext, file *files.File) (starlark.StringDict, error) { - fileBs, err := file.Bytes() - if err != nil { - return nil, err - } - - l.ui.Debugf("## file %s\n", file.RelativePath()) - - instructions := template.NewInstructionSet() - compiledTemplate := template.NewCompiledTemplate( - file.RelativePath(), template.NewCodeFromBytes(fileBs, instructions), - instructions, template.NewNodes(), template.EvaluationCtxDialects{}) - - l.addCompiledTemplate(file.RelativePath(), compiledTemplate) - l.ui.Debugf("### template\n%s", compiledTemplate.DebugCodeAsString()) - - yttLibrary := yttlibrary.NewAPI(compiledTemplate.TplReplaceNode, - yttlibrary.NewDataModule(l.values.Doc, DataLoader{libraryCtx}), - NewLibraryModule(libraryCtx, l.libraryExecFactory, l.libraryValuess, l.librarySchemas).AsModule()) - - thread := l.newThread(libraryCtx, yttLibrary, file) - - globals, _, err := compiledTemplate.Eval(thread, l) - if err != nil { - return nil, fmt.Errorf("Evaluating starlark template: %s", err) - } - - return globals, nil -} - -const ( - threadCurrentLibraryKey = "ytt.curr_library_key" - threadRootLibraryKey = "ytt.root_library_key" - threadYTTLibraryKey = "ytt.ytt_library_key" -) - -func (l *TemplateLoader) getCurrentLibrary(thread *starlark.Thread) *Library { - lib, ok := thread.Local(threadCurrentLibraryKey).(*Library) - if !ok || lib == nil { - panic("Expected to find library associated with thread") - } - return lib -} - -func (l *TemplateLoader) setCurrentLibrary(thread *starlark.Thread, library *Library) { - thread.SetLocal(threadCurrentLibraryKey, library) -} - -func (l *TemplateLoader) getRootLibrary(thread *starlark.Thread) *Library { - lib, ok := thread.Local(threadRootLibraryKey).(*Library) - if !ok || lib == nil { - panic("Expected to find root library associated with thread") - } - return lib -} - -func (l *TemplateLoader) setRootLibrary(thread *starlark.Thread, library *Library) { - thread.SetLocal(threadRootLibraryKey, library) -} - -func (l *TemplateLoader) getYTTLibrary(thread *starlark.Thread) yttlibrary.API { - yttLibrary, ok := thread.Local(threadYTTLibraryKey).(yttlibrary.API) - if !ok { - panic("Expected to find YTT library associated with thread") - } - return yttLibrary -} - -func (l *TemplateLoader) setYTTLibrary(thread *starlark.Thread, yttLibrary yttlibrary.API) { - thread.SetLocal(threadYTTLibraryKey, yttLibrary) -} - -func (l *TemplateLoader) newThread(libraryCtx LibraryExecutionContext, - yttLibrary yttlibrary.API, file *files.File) *starlark.Thread { - - thread := &starlark.Thread{Name: "template=" + file.RelativePath(), Load: l.Load} - l.setCurrentLibrary(thread, libraryCtx.Current) - l.setRootLibrary(thread, libraryCtx.Root) - l.setYTTLibrary(thread, yttLibrary) - return thread -} - -func (l *TemplateLoader) addCompiledTemplate(path string, ct *template.CompiledTemplate) { - l.compiledTemplates[path] = ct -} - -func (opts TemplateLoaderOpts) Merge(overrides TemplateLoaderOptsOverrides) TemplateLoaderOpts { - optsCopy := opts - if overrides.IgnoreUnknownComments != nil { - optsCopy.IgnoreUnknownComments = *overrides.IgnoreUnknownComments - } - if overrides.ImplicitMapKeyOverrides != nil { - optsCopy.ImplicitMapKeyOverrides = *overrides.ImplicitMapKeyOverrides - } - if overrides.StrictYAML != nil { - optsCopy.StrictYAML = *overrides.StrictYAML - } - return optsCopy -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/ast.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/ast.go deleted file mode 100644 index 65bb611da..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/ast.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "github.com/k14s/ytt/pkg/filepos" -) - -type Node interface { - GetPosition() *filepos.Position - SetPosition(*filepos.Position) - - GetValues() []interface{} // ie children - SetValue(interface{}) error - AddValue(interface{}) error - ResetValue() - - GetComments() []*Comment - addComments(*Comment) - - GetAnnotations() interface{} - SetAnnotations(interface{}) - - DeepCopyAsInterface() interface{} - DeepCopyAsNode() Node - - Check() TypeCheck - - sealed() // limit the concrete types of Node to map directly only to types allowed in YAML spec. -} - -type ValueHoldingNode interface { - Node - Val() interface{} -} - -var _ = []Node{&DocumentSet{}, &Map{}, &Array{}} -var _ = []ValueHoldingNode{&Document{}, &MapItem{}, &ArrayItem{}} - -type DocumentSet struct { - Comments []*Comment - AllComments []*Comment - - Items []*Document - Position *filepos.Position - - annotations interface{} - originalBytes *[]byte -} - -type Document struct { - Type Type - Comments []*Comment - Value interface{} - Position *filepos.Position - - annotations interface{} - injected bool // indicates that Document was not present in the parsed content -} - -type Map struct { - Type Type - Comments []*Comment - Items []*MapItem - Position *filepos.Position - - annotations interface{} -} - -type MapItem struct { - Type Type - Comments []*Comment - Key interface{} - Value interface{} - Position *filepos.Position - - annotations interface{} -} - -type Array struct { - Type Type - Comments []*Comment - Items []*ArrayItem - Position *filepos.Position - - annotations interface{} -} - -type ArrayItem struct { - Type Type - Comments []*Comment - Value interface{} - Position *filepos.Position - - annotations interface{} -} - -type Scalar struct { - Position *filepos.Position - Value interface{} -} - -type Comment struct { - Data string - Position *filepos.Position -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/convert.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/convert.go deleted file mode 100644 index 93c59f450..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/convert.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/orderedmap" - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -func NewASTFromInterface(val interface{}) interface{} { - return convertToAST(val, filepos.NewUnknownPosition()) -} - -func NewASTFromInterfaceWithPosition(val interface{}, defaultPosition *filepos.Position) interface{} { - return convertToAST(val, defaultPosition) -} - -func NewASTFromInterfaceWithNoPosition(val interface{}) interface{} { - return convertToASTWithNoPosition(val) -} - -func NewGoFromAST(val interface{}) interface{} { - return convertToGo(val) -} - -func convertToLowYAML(val interface{}) interface{} { - switch typedVal := val.(type) { - case map[interface{}]interface{}: - panic("Expected *orderedmap.Map instead of map[interface{}]interface{} in convertToLowYAML") - - case map[string]interface{}: - panic("Expected *orderedmap.Map instead of map[string]interface{} in convertToLowYAML") - - case *orderedmap.Map: - result := yaml.MapSlice{} - typedVal.Iterate(func(k, v interface{}) { - result = append(result, yaml.MapItem{ - Key: k, - Value: convertToLowYAML(v), - }) - }) - return result - - case []interface{}: - result := []interface{}{} - for _, item := range typedVal { - result = append(result, convertToLowYAML(item)) - } - return result - - default: - return val - } -} - -func convertToGo(val interface{}) interface{} { - switch typedVal := val.(type) { - case *DocumentSet: - panic("Unexpected docset value within document") - - case *Document: - panic("Unexpected document within document") - - case *Map: - result := orderedmap.NewMap() - for _, item := range typedVal.Items { - // Catch any cases where unique key invariant is violated - if _, found := result.Get(item.Key); found { - panic(fmt.Sprintf("Unexpected duplicate key: %s", item.Key)) - } - result.Set(item.Key, convertToGo(item.Value)) - } - return result - - case *Array: - result := []interface{}{} - for _, item := range typedVal.Items { - result = append(result, convertToGo(item.Value)) - } - return result - - case []interface{}: - result := []interface{}{} - for _, item := range typedVal { - result = append(result, convertToGo(item)) - } - return result - - case map[interface{}]interface{}: - panic("Expected *orderedmap.Map instead of map[interface{}]interface{} in convertToGo") - - case map[string]interface{}: - panic("Expected *orderedmap.Map instead of map[string]interface{} in convertToGo") - - case *orderedmap.Map: - result := orderedmap.NewMap() - typedVal.Iterate(func(k, v interface{}) { - result.Set(k, convertToGo(v)) - }) - return result - - default: - return val - } -} - -func convertToAST(val interface{}, defaultPosition *filepos.Position) interface{} { - switch typedVal := val.(type) { - // necessary for overlay processing - case []*DocumentSet: - for i, item := range typedVal { - typedVal[i] = convertToAST(item, defaultPosition).(*DocumentSet) - } - return typedVal - - case *DocumentSet: - for i, item := range typedVal.Items { - typedVal.Items[i] = convertToAST(item, defaultPosition).(*Document) - } - return typedVal - - case *Document: - typedVal.Value = convertToAST(typedVal.Value, defaultPosition) - return typedVal - - case *Map: - for i, item := range typedVal.Items { - typedVal.Items[i] = convertToAST(item, defaultPosition).(*MapItem) - } - return typedVal - - case *MapItem: - typedVal.Key = convertToAST(typedVal.Key, defaultPosition) - typedVal.Value = convertToAST(typedVal.Value, defaultPosition) - return typedVal - - case *Array: - for i, item := range typedVal.Items { - typedVal.Items[i] = convertToAST(item, defaultPosition).(*ArrayItem) - } - return typedVal - - case *ArrayItem: - typedVal.Value = convertToAST(typedVal.Value, defaultPosition) - return typedVal - - case []interface{}: - result := &Array{} - for _, item := range typedVal { - result.Items = append(result.Items, &ArrayItem{ - Value: convertToAST(item, defaultPosition), - Position: defaultPosition, - }) - } - return result - - case map[interface{}]interface{}: - panic("Expected *orderedmap.Map instead of map[interface{}]interface{} in convertToAST") - - case map[string]interface{}: - panic("Expected *orderedmap.Map instead of map[string]interface{} in convertToAST") - - case *orderedmap.Map: - result := &Map{Position: defaultPosition} - typedVal.Iterate(func(k, v interface{}) { - result.Items = append(result.Items, &MapItem{ - Key: k, - Value: convertToAST(v, defaultPosition), - Position: defaultPosition, - }) - }) - return result - - default: - return val - } -} - -func convertToASTWithNoPosition(val interface{}) interface{} { - switch typedVal := val.(type) { - // necessary for overlay processing - case []*DocumentSet: - for i, item := range typedVal { - typedVal[i] = convertToASTWithNoPosition(item).(*DocumentSet) - } - return typedVal - - case *DocumentSet: - for i, item := range typedVal.Items { - typedVal.Items[i] = convertToASTWithNoPosition(item).(*Document) - } - return typedVal - - case *Document: - typedVal.Value = convertToASTWithNoPosition(typedVal.Value) - return typedVal - - case *Map: - for i, item := range typedVal.Items { - typedVal.Items[i] = convertToASTWithNoPosition(item).(*MapItem) - } - return typedVal - - case *MapItem: - typedVal.Key = convertToASTWithNoPosition(typedVal.Key) - typedVal.Value = convertToASTWithNoPosition(typedVal.Value) - return typedVal - - case *Array: - for i, item := range typedVal.Items { - typedVal.Items[i] = convertToASTWithNoPosition(item).(*ArrayItem) - } - return typedVal - - case *ArrayItem: - typedVal.Value = convertToASTWithNoPosition(typedVal.Value) - return typedVal - - case []interface{}: - result := &Array{} - for _, item := range typedVal { - convertedValue := convertToASTWithNoPosition(item) - result.Items = append(result.Items, &ArrayItem{ - Value: convertedValue, - Position: filepos.NewUnknownPositionWithKeyVal("-", convertedValue, ""), - }) - } - return result - - case map[interface{}]interface{}: - panic("Expected *orderedmap.Map instead of map[interface{}]interface{} in convertToAST") - - case map[string]interface{}: - panic("Expected *orderedmap.Map instead of map[string]interface{} in convertToAST") - - case *orderedmap.Map: - result := &Map{} - typedVal.Iterate(func(k, v interface{}) { - convertedValue := convertToASTWithNoPosition(v) - result.Items = append(result.Items, &MapItem{ - Key: k, - Value: convertedValue, - Position: filepos.NewUnknownPositionWithKeyVal(k, convertedValue, ":"), - }) - }) - return result - - default: - return val - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/deep_copy.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/deep_copy.go deleted file mode 100644 index 2e606a7cd..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/deep_copy.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -func (ds *DocumentSet) DeepCopyAsNode() Node { return ds.DeepCopy() } -func (d *Document) DeepCopyAsNode() Node { return d.DeepCopy() } -func (m *Map) DeepCopyAsNode() Node { return m.DeepCopy() } -func (mi *MapItem) DeepCopyAsNode() Node { return mi.DeepCopy() } -func (a *Array) DeepCopyAsNode() Node { return a.DeepCopy() } -func (ai *ArrayItem) DeepCopyAsNode() Node { return ai.DeepCopy() } - -func (ds *DocumentSet) DeepCopyAsInterface() interface{} { return ds.DeepCopy() } -func (d *Document) DeepCopyAsInterface() interface{} { return d.DeepCopy() } -func (m *Map) DeepCopyAsInterface() interface{} { return m.DeepCopy() } -func (mi *MapItem) DeepCopyAsInterface() interface{} { return mi.DeepCopy() } -func (a *Array) DeepCopyAsInterface() interface{} { return a.DeepCopy() } -func (ai *ArrayItem) DeepCopyAsInterface() interface{} { return ai.DeepCopy() } - -func (ds *DocumentSet) DeepCopy() *DocumentSet { - var newItems []*Document - for _, item := range ds.Items { - newItems = append(newItems, item.DeepCopy()) - } - - return &DocumentSet{ - Comments: []*Comment(CommentSlice(ds.Comments).DeepCopy()), - AllComments: []*Comment(CommentSlice(ds.AllComments).DeepCopy()), - - Items: newItems, - Position: ds.Position, - - annotations: annotationsDeepCopy(ds.annotations), - } -} - -func (d *Document) DeepCopy() *Document { - return &Document{ - Comments: []*Comment(CommentSlice(d.Comments).DeepCopy()), - Value: nodeDeepCopy(d.Value), - Position: d.Position, - - annotations: annotationsDeepCopy(d.annotations), - injected: d.injected, - } -} - -func (m *Map) DeepCopy() *Map { - var newItems []*MapItem - for _, item := range m.Items { - newItems = append(newItems, item.DeepCopy()) - } - - return &Map{ - Comments: []*Comment(CommentSlice(m.Comments).DeepCopy()), - Items: newItems, - Position: m.Position, - - annotations: annotationsDeepCopy(m.annotations), - } -} - -func (mi *MapItem) DeepCopy() *MapItem { - return &MapItem{ - Comments: []*Comment(CommentSlice(mi.Comments).DeepCopy()), - Key: mi.Key, - Value: nodeDeepCopy(mi.Value), - Position: mi.Position, - - annotations: annotationsDeepCopy(mi.annotations), - } -} - -func (a *Array) DeepCopy() *Array { - var newItems []*ArrayItem - for _, item := range a.Items { - newItems = append(newItems, item.DeepCopy()) - } - - return &Array{ - Comments: []*Comment(CommentSlice(a.Comments).DeepCopy()), - Items: newItems, - Position: a.Position, - - annotations: annotationsDeepCopy(a.annotations), - } -} - -func (ai *ArrayItem) DeepCopy() *ArrayItem { - return &ArrayItem{ - Comments: []*Comment(CommentSlice(ai.Comments).DeepCopy()), - Value: nodeDeepCopy(ai.Value), - Position: ai.Position, - - annotations: annotationsDeepCopy(ai.annotations), - } -} - -func (n *Comment) DeepCopy() *Comment { return &(*n) } - -type CommentSlice []*Comment - -func (s CommentSlice) DeepCopy() CommentSlice { - var result []*Comment - for _, comment := range s { - result = append(result, comment.DeepCopy()) - } - return result -} - -func nodeDeepCopy(val interface{}) interface{} { - if node, ok := val.(Node); ok { - return node.DeepCopyAsInterface() - } - return val -} - -func annotationsDeepCopy(anns interface{}) interface{} { - if anns == nil { - return nil - } - return anns.(interface{ DeepCopyAsInterface() interface{} }).DeepCopyAsInterface() -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/document.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/document.go deleted file mode 100644 index e2f96ca67..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/document.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -func (d *Document) IsEmpty() bool { - if d.Value == nil { - return true - } - // TODO remove doc empty checks for map and array - if typedMap, isMap := d.Value.(*Map); isMap { - return len(typedMap.Items) == 0 - } - if typedArray, isArray := d.Value.(*Array); isArray { - return len(typedArray.Items) == 0 - } - return false -} - -func (d *Document) AsYAMLBytes() ([]byte, error) { - return yaml.Marshal(convertToLowYAML(convertToGo(d.Value))) -} - -func (d *Document) AsInterface() interface{} { - return convertToGo(d.Value) -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/document_set.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/document_set.go deleted file mode 100644 index 36b5fd4c8..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/document_set.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "bytes" - "io" -) - -type DocSetOpts struct { - WithoutComments bool - Strict bool - // associatedName is typically a file name where data came from - AssociatedName string -} - -func NewDocumentSetFromBytes(data []byte, opts DocSetOpts) (*DocumentSet, error) { - parserOpts := ParserOpts{WithoutComments: opts.WithoutComments, Strict: opts.Strict} - - docSet, err := NewParser(parserOpts).ParseBytes(data, opts.AssociatedName) - if err != nil { - return nil, err - } - docSet.originalBytes = &data - return docSet, nil -} - -func (ds *DocumentSet) Print(writer io.Writer) { - NewPrinter(writer).Print(ds) -} - -// AsSourceBytes() returns bytes used to make original DocumentSet. -// Any changes made to the DocumentSet are not reflected in any way -func (ds *DocumentSet) AsSourceBytes() ([]byte, bool) { - if ds.originalBytes != nil { - return *ds.originalBytes, true - } - return nil, false -} - -func (ds *DocumentSet) AsBytes() ([]byte, error) { - return ds.AsBytesWithPrinter(nil) -} - -func (ds *DocumentSet) AsBytesWithPrinter(printerFunc func(io.Writer) DocumentPrinter) ([]byte, error) { - if printerFunc == nil { - printerFunc = func(w io.Writer) DocumentPrinter { return NewYAMLPrinter(w) } - } - - buf := new(bytes.Buffer) - printer := printerFunc(buf) - - for _, item := range ds.Items { - if item.injected || item.IsEmpty() { - continue - } - printer.Print(item) - } - - return buf.Bytes(), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/file_position_printer.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/file_position_printer.go deleted file mode 100644 index b7dcb58ed..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/file_position_printer.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "bytes" - "fmt" - "io" - "strings" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -type FilePositionPrinter struct { - writer io.Writer - opts FilePositionPrinterOpts - locWidth int -} - -type FilePositionPrinterOpts struct{} - -func NewFilePositionPrinter(writer io.Writer) *FilePositionPrinter { - return &FilePositionPrinter{writer, FilePositionPrinterOpts{}, 0} -} - -func NewFilePositionPrinterWithOpts(writer io.Writer, opts FilePositionPrinterOpts) *FilePositionPrinter { - return &FilePositionPrinter{writer, opts, 0} -} - -func (p *FilePositionPrinter) Print(val interface{}) { - fmt.Fprintf(p.writer, "%s", p.PrintStr(val)) -} - -func (p *FilePositionPrinter) PrintStr(val interface{}) string { - buf := new(bytes.Buffer) - p.print(val, "", buf) - return buf.String() -} - -func (p *FilePositionPrinter) print(val interface{}, indent string, writer io.Writer) { - const indentLvl = " " - - switch typedVal := val.(type) { - case *DocumentSet: - fmt.Fprintf(writer, "%s%s[docset]\n", p.lineStr(typedVal.Position), indent) - - for _, item := range typedVal.Items { - p.print(item, indent+indentLvl, writer) - } - - case *Document: - fmt.Fprintf(writer, "%s%s[doc]\n", p.lineStr(typedVal.Position), indent) - p.print(typedVal.Value, indent+indentLvl, writer) - - case *Map: - // fmt.Fprintf(writer, "%s%smap\n", indent, p.lineStr(typedVal.Position)) - - for _, item := range typedVal.Items { - valStr, isLeaf := p.leafValue(item.Value) - if !isLeaf || strings.Contains(valStr, "\n") { - fmt.Fprintf(writer, "%s%s%s:\n", p.lineStr(item.Position), indent, item.Key) - p.print(item.Value, indent+indentLvl, writer) - } else { - fmt.Fprintf(writer, "%s%s%s: %s\n", p.lineStr(item.Position), indent, item.Key, valStr) - } - } - - case *MapItem: - fmt.Fprintf(writer, "%s%s%s:\n", p.lineStr(typedVal.Position), indent, typedVal.Key) - p.print(typedVal.Value, indent+indentLvl, writer) - - case *Array: - // fmt.Fprintf(writer, "%s%sarray\n", indent, p.lineStr(typedVal.Position)) - - for i, item := range typedVal.Items { - valStr, isLeaf := p.leafValue(item.Value) - if !isLeaf || strings.Contains(valStr, "\n") { - fmt.Fprintf(writer, "%s%s[%d]\n", p.lineStr(item.Position), indent, i) - p.print(item.Value, indent+indentLvl, writer) - } else { - fmt.Fprintf(writer, "%s%s[%d] %s\n", p.lineStr(item.Position), indent, i, valStr) - } - } - - case *ArrayItem: - fmt.Fprintf(writer, "%s%s[?]\n", p.lineStr(typedVal.Position), indent) - p.print(typedVal.Value, indent+indentLvl, writer) - - default: - valStr, isLeaf := p.leafValue(val) - if !isLeaf { - panic(fmt.Sprintf("Expected leaf, but was %T", typedVal)) - } - fmt.Fprintf(writer, p.padLine("")+fmt.Sprintf("%s%s\n", indent, valStr)) - } -} - -func (p *FilePositionPrinter) leafValue(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case *DocumentSet, *Document, *Map, *MapItem, *Array, *ArrayItem: - return "", false - - default: - typedValBs, err := yaml.Marshal(typedVal) - if err != nil { - panic(fmt.Sprintf("Failed to serialize %T", typedVal)) - } - return string(typedValBs[:len(typedValBs)-1]), true // strip newline at the end - } -} - -func (p *FilePositionPrinter) lineStr(pos *filepos.Position) string { - if pos.IsKnown() { - return p.padLine(pos.AsCompactString()) - } - return "" -} - -func (p *FilePositionPrinter) padLine(str string) string { - width := len(str) - if width > p.locWidth { - p.locWidth = width + 10 - } - return fmt.Sprintf(fmt.Sprintf("%%%ds | ", p.locWidth), str) -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/.travis.yml b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/.travis.yml deleted file mode 100644 index 9f556934d..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/LICENSE b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/LICENSE.libyaml b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/NOTICE b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/NOTICE deleted file mode 100644 index 866d74a7a..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/README.md b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/README.md deleted file mode 100644 index 94f52272d..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/apic.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/apic.go deleted file mode 100644 index cbb273967..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/apic.go +++ /dev/null @@ -1,743 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "io" -) - -func yamlInsertToken(parser *yamlParserT, pos int, token *yamlTokenT) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokensHead > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokensHead != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokensHead:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokensHead] - parser.tokensHead = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokensHead+pos+1:], parser.tokens[parser.tokensHead+pos:]) - parser.tokens[parser.tokensHead+pos] = *token -} - -// Create a new parser object. -func yamlParserInitialize(parser *yamlParserT) bool { - *parser = yamlParserT{ - rawBuffer: make([]byte, 0, inputRawBufferSize), - buffer: make([]byte, 0, inputBufferSize), - } - return true -} - -// Destroy a parser object. -func yamlParserDelete(parser *yamlParserT) { - *parser = yamlParserT{} -} - -// String read handler. -func yamlStringReadHandler(parser *yamlParserT, buffer []byte) (n int, err error) { - if parser.inputPos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.inputPos:]) - parser.inputPos += n - return n, nil -} - -// Reader read handler. -func yamlReaderReadHandler(parser *yamlParserT, buffer []byte) (n int, err error) { - return parser.inputReader.Read(buffer) -} - -// Set a string input. -func yamlParserSetInputString(parser *yamlParserT, input []byte) { - if parser.readHandler != nil { - panic("must set the input source only once") - } - parser.readHandler = yamlStringReadHandler - parser.input = input - parser.inputPos = 0 -} - -// Set a file input. -func yamlParserSetInputReader(parser *yamlParserT, r io.Reader) { - if parser.readHandler != nil { - panic("must set the input source only once") - } - parser.readHandler = yamlReaderReadHandler - parser.inputReader = r -} - -// Set the source encoding. -func yamlParserSetEncoding(parser *yamlParserT, encoding yamlEncodingT) { - if parser.encoding != yamlAnyEncoding { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yamlEmitterInitialize(emitter *yamlEmitterT) { - *emitter = yamlEmitterT{ - buffer: make([]byte, outputBufferSize), - rawBuffer: make([]byte, 0, outputRawBufferSize), - states: make([]yamlEmitterStateT, 0, initialStackSize), - events: make([]yamlEventT, 0, initialQueueSize), - bestWidth: -1, - } -} - -// Destroy an emitter object. -func yamlEmitterDelete(emitter *yamlEmitterT) { - *emitter = yamlEmitterT{} -} - -// String write handler. -func yamlStringWriteHandler(emitter *yamlEmitterT, buffer []byte) error { - *emitter.outputBuffer = append(*emitter.outputBuffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yamlWriterWriteHandler(emitter *yamlEmitterT, buffer []byte) error { - _, err := emitter.outputWriter.Write(buffer) - return err -} - -// Set a string output. -func yamlEmitterSetOutputString(emitter *yamlEmitterT, outputBuffer *[]byte) { - if emitter.writeHandler != nil { - panic("must set the output target only once") - } - emitter.writeHandler = yamlStringWriteHandler - emitter.outputBuffer = outputBuffer -} - -// Set a file output. -func yamlEmitterSetOutputWriter(emitter *yamlEmitterT, w io.Writer) { - if emitter.writeHandler != nil { - panic("must set the output target only once") - } - emitter.writeHandler = yamlWriterWriteHandler - emitter.outputWriter = w -} - -// Set the output encoding. -func yamlEmitterSetEncoding(emitter *yamlEmitterT, encoding yamlEncodingT) { - if emitter.encoding != yamlAnyEncoding { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yamlEmitterSetCanonical(emitter *yamlEmitterT, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yamlEmitterSetIndent(emitter *yamlEmitterT, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.bestIndent = indent -} - -// Set the preferred line width. -func yamlEmitterSetWidth(emitter *yamlEmitterT, width int) { - if width < 0 { - width = -1 - } - emitter.bestWidth = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yamlEmitterSetUnicode(emitter *yamlEmitterT, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yamlEmitterSetBreak(emitter *yamlEmitterT, lineBreak yamlBreakT) { - emitter.lineBreak = lineBreak -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yamlStreamStartEventInitialize(event *yamlEventT, encoding yamlEncodingT) { - *event = yamlEventT{ - typ: yamlStreamStartEvent, - encoding: encoding, - } -} - -// Create STREAM-END. -func yamlStreamEndEventInitialize(event *yamlEventT) { - *event = yamlEventT{ - typ: yamlStreamEndEvent, - } -} - -// Create DOCUMENT-START. -func yamlDocumentStartEventInitialize( - event *yamlEventT, - versionDirective *yamlVersionDirectiveT, - tagDirectives []yamlTagDirectiveT, - implicit bool, -) { - *event = yamlEventT{ - typ: yamlDocumentStartEvent, - versionDirective: versionDirective, - tagDirectives: tagDirectives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yamlDocumentEndEventInitialize(event *yamlEventT, implicit bool) { - *event = yamlEventT{ - typ: yamlDocumentEndEvent, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yamlScalarEventInitialize(event *yamlEventT, anchor, tag, value []byte, plainImplicit, quotedImplicit bool, style yamlScalarStyleT) bool { - *event = yamlEventT{ - typ: yamlScalarEvent, - anchor: anchor, - tag: tag, - value: value, - implicit: plainImplicit, - quotedImplicit: quotedImplicit, - style: yamlStyleT(style), - } - return true -} - -// Create SEQUENCE-START. -func yamlSequenceStartEventInitialize(event *yamlEventT, anchor, tag []byte, implicit bool, style yamlSequenceStyleT) bool { - *event = yamlEventT{ - typ: yamlSequenceStartEvent, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(style), - } - return true -} - -// Create SEQUENCE-END. -func yamlSequenceEndEventInitialize(event *yamlEventT) bool { - *event = yamlEventT{ - typ: yamlSequenceEndEvent, - } - return true -} - -// Create MAPPING-START. -func yamlMappingStartEventInitialize(event *yamlEventT, anchor, tag []byte, implicit bool, style yamlMappingStyleT) { - *event = yamlEventT{ - typ: yamlMappingStartEvent, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(style), - } -} - -// Create MAPPING-END. -func yamlMappingEndEventInitialize(event *yamlEventT) { - *event = yamlEventT{ - typ: yamlMappingEndEvent, - } -} - -// Destroy an event object. -func yamlEventDelete(event *yamlEventT) { - *event = yamlEventT{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/decode.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/decode.go deleted file mode 100644 index 932ac61d8..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/decode.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - sequenceItemNode - scalarNode - aliasNode - commentNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yamlParserT - event yamlEventT - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yamlParserInitialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yamlParserSetInputString(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yamlParserInitialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yamlParserSetInputReader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yamlStreamStartEvent) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yamlNoEvent { - yamlEventDelete(&p.event) - } - yamlParserDelete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yamlEventTypeT) { - if p.event.typ == yamlNoEvent { - if !yamlParserParse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yamlStreamEndEvent { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yamlEventDelete(&p.event) - p.event.typ = yamlNoEvent -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yamlEventTypeT { - if p.event.typ != yamlNoEvent { - return p.event.typ - } - if !yamlParserParse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problemMark.line != 0 { - line = p.parser.problemMark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yamlScannerError { - line++ - } - } else if p.parser.contextMark.line != 0 { - line = p.parser.contextMark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yamlScalarEvent: - return p.scalar() - case yamlAliasEvent: - return p.alias() - case yamlMappingStartEvent: - return p.mapping() - case yamlSequenceStartEvent: - return p.sequence() - case yamlDocumentStartEvent: - return p.document() - case yamlStreamEndEvent: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.startMark.line, - column: p.event.startMark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yamlDocumentStartEvent) - n.children = append(n.children, p.parse()) - p.expect(yamlDocumentEndEvent) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yamlAliasEvent) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yamlScalarEvent) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yamlSequenceStartEvent) - for p.peek() != yamlSequenceEndEvent { - if p.parser.pendingSeqItemEvent != nil { - n.children = append(n.children, &node{ - kind: sequenceItemNode, - line: p.parser.pendingSeqItemEvent.startMark.line, - column: p.parser.pendingSeqItemEvent.startMark.column, - }) - p.parser.pendingSeqItemEvent = nil - } - n.children = append(n.children, p.parse()) - } - p.expect(yamlSequenceEndEvent) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yamlMappingStartEvent) - for p.peek() != yamlMappingEndEvent { - n.children = append(n.children, p.parse()) - } - p.expect(yamlMappingEndEvent) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - resolveFunc func(tag string, in string) (rtag string, out interface{}) -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict, resolveFunc: resolve} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yamlSeqTag && tag != yamlMapTag { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yamlNullTag || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - good = d.unmarshal(n.alias, out) - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yamlStrTag - resolved = n.value - } else { - tag, resolved = d.resolveFunc(n.tag, n.value) - if tag == yamlBinaryTag { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yamlBinaryTag { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yamlBinaryTag { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yamlTimestampTag { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - // If aliased content contains sequence this function will be called multiple times - childrenWithoutPosNodes := []*node{} - lineNums := []int{} - for _, child := range n.children { - if child.kind == sequenceItemNode { - lineNums = append(lineNums, child.line) - continue - } - childrenWithoutPosNodes = append(childrenWithoutPosNodes, child) - } - if len(childrenWithoutPosNodes) != len(lineNums) { - panic(fmt.Sprintf("expected len of sequence children to match len of children line nums: %d != %d", len(childrenWithoutPosNodes), len(lineNums))) - } - - l := len(childrenWithoutPosNodes) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yamlSeqTag, out) - return false - } - et := out.Type().Elem() - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(childrenWithoutPosNodes[i], e); ok { - eItem := reflect.ValueOf(ArrayItem{Value: e.Interface(), Line: lineNums[i]}) - out.Index(j).Set(eItem) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yamlMapTag, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yamlMapTag, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - // Taken from https://github.com/go-yaml/yaml/pull/364 - s := MapSlice{} - d.merge(n.children[i+1], reflect.ValueOf(&s)) - slice = append(slice, s...) - continue - } - item := MapItem{Line: n.children[i].line} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.ID] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.ID] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yamlMergeTag) -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/emitterc.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/emitterc.go deleted file mode 100644 index 587b77eef..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/emitterc.go +++ /dev/null @@ -1,1688 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yamlEmitterT) bool { - if emitter.bufferPos+5 >= len(emitter.buffer) { - return yamlEmitterFlush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yamlEmitterT, value byte) bool { - if emitter.bufferPos+5 >= len(emitter.buffer) && !yamlEmitterFlush(emitter) { - return false - } - emitter.buffer[emitter.bufferPos] = value - emitter.bufferPos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func putBreak(emitter *yamlEmitterT) bool { - if emitter.bufferPos+5 >= len(emitter.buffer) && !yamlEmitterFlush(emitter) { - return false - } - switch emitter.lineBreak { - case yamlCrBreak: - emitter.buffer[emitter.bufferPos] = '\r' - emitter.bufferPos++ - case yamlLnBreak: - emitter.buffer[emitter.bufferPos] = '\n' - emitter.bufferPos++ - case yamlCrlnBreak: - emitter.buffer[emitter.bufferPos+0] = '\r' - emitter.buffer[emitter.bufferPos+1] = '\n' - emitter.bufferPos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yamlEmitterT, s []byte, i *int) bool { - if emitter.bufferPos+5 >= len(emitter.buffer) && !yamlEmitterFlush(emitter) { - return false - } - p := emitter.bufferPos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.bufferPos += w - *i += w - return true -} - -// Write a whole string into buffer. -func writeAll(emitter *yamlEmitterT, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func writeBreak(emitter *yamlEmitterT, s []byte, i *int) bool { - if s[*i] == '\n' { - if !putBreak(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yamlEmitterSetEmitterError(emitter *yamlEmitterT, problem string) bool { - emitter.error = yamlEmitterError - emitter.problem = problem - return false -} - -// Emit an event. -func yamlEmitterEmit(emitter *yamlEmitterT, event *yamlEventT) bool { - emitter.events = append(emitter.events, *event) - for !yamlEmitterNeedMoreEvents(emitter) { - event := &emitter.events[emitter.eventsHead] - if !yamlEmitterAnalyzeEvent(emitter, event) { - return false - } - if !yamlEmitterStateMachine(emitter, event) { - return false - } - yamlEventDelete(event) - emitter.eventsHead++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yamlEmitterNeedMoreEvents(emitter *yamlEmitterT) bool { - if emitter.eventsHead == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.eventsHead].typ { - case yamlDocumentStartEvent: - accumulate = 1 - break - case yamlSequenceStartEvent: - accumulate = 2 - break - case yamlMappingStartEvent: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.eventsHead > accumulate { - return false - } - var level int - for i := emitter.eventsHead; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yamlStreamStartEvent, yamlDocumentStartEvent, yamlSequenceStartEvent, yamlMappingStartEvent: - level++ - case yamlStreamEndEvent, yamlDocumentEndEvent, yamlSequenceEndEvent, yamlMappingEndEvent: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yamlEmitterAppendTagDirective(emitter *yamlEmitterT, value *yamlTagDirectiveT, allowDuplicates bool) bool { - for i := 0; i < len(emitter.tagDirectives); i++ { - if bytes.Equal(value.handle, emitter.tagDirectives[i].handle) { - if allowDuplicates { - return true - } - return yamlEmitterSetEmitterError(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tagCopy := yamlTagDirectiveT{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tagCopy.handle, value.handle) - copy(tagCopy.prefix, value.prefix) - emitter.tagDirectives = append(emitter.tagDirectives, tagCopy) - return true -} - -// Increase the indentation level. -func yamlEmitterIncreaseIndent(emitter *yamlEmitterT, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.bestIndent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.bestIndent - } - return true -} - -// State dispatcher. -func yamlEmitterStateMachine(emitter *yamlEmitterT, event *yamlEventT) bool { - switch emitter.state { - default: - case yamlEmitStreamStartState: - return yamlEmitterEmitStreamStart(emitter, event) - - case yamlEmitFirstDocumentStartState: - return yamlEmitterEmitDocumentStart(emitter, event, true) - - case yamlEmitDocumentStartState: - return yamlEmitterEmitDocumentStart(emitter, event, false) - - case yamlEmitDocumentContentState: - return yamlEmitterEmitDocumentContent(emitter, event) - - case yamlEmitDocumentEndState: - return yamlEmitterEmitDocumentEnd(emitter, event) - - case yamlEmitFlowSequenceFirstItemState: - return yamlEmitterEmitFlowSequenceItem(emitter, event, true) - - case yamlEmitFlowSequenceItemState: - return yamlEmitterEmitFlowSequenceItem(emitter, event, false) - - case yamlEmitFlowMappingFirstKeyState: - return yamlEmitterEmitFlowMappingKey(emitter, event, true) - - case yamlEmitFlowMappingKeyState: - return yamlEmitterEmitFlowMappingKey(emitter, event, false) - - case yamlEmitFlowMappingSimpleValueState: - return yamlEmitterEmitFlowMappingValue(emitter, event, true) - - case yamlEmitFlowMappingValueState: - return yamlEmitterEmitFlowMappingValue(emitter, event, false) - - case yamlEmitBlockSequenceFirstItemState: - return yamlEmitterEmitBlockSequenceItem(emitter, event, true) - - case yamlEmitBlockSequenceItemState: - return yamlEmitterEmitBlockSequenceItem(emitter, event, false) - - case yamlEmitBlockMappingFirstKeyState: - return yamlEmitterEmitBlockMappingKey(emitter, event, true) - - case yamlEmitBlockMappingKeyState: - return yamlEmitterEmitBlockMappingKey(emitter, event, false) - - case yamlEmitBlockMappingSimpleValueState: - return yamlEmitterEmitBlockMappingValue(emitter, event, true) - - case yamlEmitBlockMappingValueState: - return yamlEmitterEmitBlockMappingValue(emitter, event, false) - - case yamlEmitEndState: - return yamlEmitterSetEmitterError(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yamlEmitterEmitStreamStart(emitter *yamlEmitterT, event *yamlEventT) bool { - if event.typ != yamlStreamStartEvent { - return yamlEmitterSetEmitterError(emitter, "expected STREAM-START") - } - if emitter.encoding == yamlAnyEncoding { - emitter.encoding = event.encoding - if emitter.encoding == yamlAnyEncoding { - emitter.encoding = yamlUtf8Encoding - } - } - if emitter.bestIndent < 2 || emitter.bestIndent > 9 { - emitter.bestIndent = 2 - } - if emitter.bestWidth >= 0 && emitter.bestWidth <= emitter.bestIndent*2 { - emitter.bestWidth = 80 - } - if emitter.bestWidth < 0 { - emitter.bestWidth = 1<<31 - 1 - } - if emitter.lineBreak == yamlAnyBreak { - emitter.lineBreak = yamlLnBreak - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yamlUtf8Encoding { - if !yamlEmitterWriteBom(emitter) { - return false - } - } - emitter.state = yamlEmitFirstDocumentStartState - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yamlEmitterEmitDocumentStart(emitter *yamlEmitterT, event *yamlEventT, first bool) bool { - - if event.typ == yamlDocumentStartEvent { - - if event.versionDirective != nil { - if !yamlEmitterAnalyzeVersionDirective(emitter, event.versionDirective) { - return false - } - } - - for i := 0; i < len(event.tagDirectives); i++ { - tagDirective := &event.tagDirectives[i] - if !yamlEmitterAnalyzeTagDirective(emitter, tagDirective) { - return false - } - if !yamlEmitterAppendTagDirective(emitter, tagDirective, false) { - return false - } - } - - for i := 0; i < len(defaultTagDirectives); i++ { - tagDirective := &defaultTagDirectives[i] - if !yamlEmitterAppendTagDirective(emitter, tagDirective, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.openEnded && (event.versionDirective != nil || len(event.tagDirectives) > 0) { - if !yamlEmitterWriteIndicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - - if event.versionDirective != nil { - implicit = false - if !yamlEmitterWriteIndicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yamlEmitterWriteIndicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - - if len(event.tagDirectives) > 0 { - implicit = false - for i := 0; i < len(event.tagDirectives); i++ { - tagDirective := &event.tagDirectives[i] - if !yamlEmitterWriteIndicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yamlEmitterWriteTagHandle(emitter, tagDirective.handle) { - return false - } - if !yamlEmitterWriteTagContent(emitter, tagDirective.prefix, true) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - } - - if yamlEmitterCheckEmptyDocument(emitter) { - implicit = false - } - if !implicit { - if !yamlEmitterWriteIndent(emitter) { - return false - } - if !yamlEmitterWriteIndicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - } - - emitter.state = yamlEmitDocumentContentState - return true - } - - if event.typ == yamlStreamEndEvent { - if emitter.openEnded { - if !yamlEmitterWriteIndicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !yamlEmitterFlush(emitter) { - return false - } - emitter.state = yamlEmitEndState - return true - } - - return yamlEmitterSetEmitterError(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yamlEmitterEmitDocumentContent(emitter *yamlEmitterT, event *yamlEventT) bool { - emitter.states = append(emitter.states, yamlEmitDocumentEndState) - return yamlEmitterEmitNode(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yamlEmitterEmitDocumentEnd(emitter *yamlEmitterT, event *yamlEventT) bool { - if event.typ != yamlDocumentEndEvent { - return yamlEmitterSetEmitterError(emitter, "expected DOCUMENT-END") - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yamlEmitterWriteIndicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !yamlEmitterFlush(emitter) { - return false - } - emitter.state = yamlEmitDocumentStartState - emitter.tagDirectives = emitter.tagDirectives[:0] - return true -} - -// Expect a flow item node. -func yamlEmitterEmitFlowSequenceItem(emitter *yamlEmitterT, event *yamlEventT, first bool) bool { - if first { - if !yamlEmitterWriteIndicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yamlEmitterIncreaseIndent(emitter, true, false) { - return false - } - emitter.flowLevel++ - } - - if event.typ == yamlSequenceEndEvent { - emitter.flowLevel-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yamlEmitterWriteIndicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !yamlEmitterWriteIndicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yamlEmitterWriteIndicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.bestWidth { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yamlEmitFlowSequenceItemState) - return yamlEmitterEmitNode(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yamlEmitterEmitFlowMappingKey(emitter *yamlEmitterT, event *yamlEventT, first bool) bool { - if first { - if !yamlEmitterWriteIndicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yamlEmitterIncreaseIndent(emitter, true, false) { - return false - } - emitter.flowLevel++ - } - - if event.typ == yamlMappingEndEvent { - emitter.flowLevel-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yamlEmitterWriteIndicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !yamlEmitterWriteIndicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yamlEmitterWriteIndicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.bestWidth { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - - if !emitter.canonical && yamlEmitterCheckSimpleKey(emitter) { - emitter.states = append(emitter.states, yamlEmitFlowMappingSimpleValueState) - return yamlEmitterEmitNode(emitter, event, false, false, true, true) - } - if !yamlEmitterWriteIndicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yamlEmitFlowMappingValueState) - return yamlEmitterEmitNode(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yamlEmitterEmitFlowMappingValue(emitter *yamlEmitterT, event *yamlEventT, simple bool) bool { - if simple { - if !yamlEmitterWriteIndicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.bestWidth { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !yamlEmitterWriteIndicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yamlEmitFlowMappingKeyState) - return yamlEmitterEmitNode(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yamlEmitterEmitBlockSequenceItem(emitter *yamlEmitterT, event *yamlEventT, first bool) bool { - if first { - if !yamlEmitterIncreaseIndent(emitter, false, emitter.mappingContext && !emitter.indention) { - return false - } - } - if event.typ == yamlSequenceEndEvent { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - if !yamlEmitterWriteIndicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yamlEmitBlockSequenceItemState) - return yamlEmitterEmitNode(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yamlEmitterEmitBlockMappingKey(emitter *yamlEmitterT, event *yamlEventT, first bool) bool { - if first { - if !yamlEmitterIncreaseIndent(emitter, false, false) { - return false - } - } - if event.typ == yamlMappingEndEvent { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yamlEmitterWriteIndent(emitter) { - return false - } - if yamlEmitterCheckSimpleKey(emitter) { - emitter.states = append(emitter.states, yamlEmitBlockMappingSimpleValueState) - return yamlEmitterEmitNode(emitter, event, false, false, true, true) - } - if !yamlEmitterWriteIndicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yamlEmitBlockMappingValueState) - return yamlEmitterEmitNode(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yamlEmitterEmitBlockMappingValue(emitter *yamlEmitterT, event *yamlEventT, simple bool) bool { - if simple { - if !yamlEmitterWriteIndicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yamlEmitterWriteIndent(emitter) { - return false - } - if !yamlEmitterWriteIndicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yamlEmitBlockMappingKeyState) - return yamlEmitterEmitNode(emitter, event, false, false, true, false) -} - -// Expect a node. -func yamlEmitterEmitNode(emitter *yamlEmitterT, event *yamlEventT, - root bool, sequence bool, mapping bool, simpleKey bool) bool { - - emitter.rootContext = root - emitter.sequenceContext = sequence - emitter.mappingContext = mapping - emitter.simpleKeyContext = simpleKey - - switch event.typ { - case yamlAliasEvent: - return yamlEmitterEmitAlias(emitter, event) - case yamlScalarEvent: - return yamlEmitterEmitScalar(emitter, event) - case yamlSequenceStartEvent: - return yamlEmitterEmitSequenceStart(emitter, event) - case yamlMappingStartEvent: - return yamlEmitterEmitMappingStart(emitter, event) - default: - return yamlEmitterSetEmitterError(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yamlEmitterEmitAlias(emitter *yamlEmitterT, event *yamlEventT) bool { - if !yamlEmitterProcessAnchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yamlEmitterEmitScalar(emitter *yamlEmitterT, event *yamlEventT) bool { - if !yamlEmitterSelectScalarStyle(emitter, event) { - return false - } - if !yamlEmitterProcessAnchor(emitter) { - return false - } - if !yamlEmitterProcessTag(emitter) { - return false - } - if !yamlEmitterIncreaseIndent(emitter, true, false) { - return false - } - if !yamlEmitterProcessScalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yamlEmitterEmitSequenceStart(emitter *yamlEmitterT, event *yamlEventT) bool { - if !yamlEmitterProcessAnchor(emitter) { - return false - } - if !yamlEmitterProcessTag(emitter) { - return false - } - if emitter.flowLevel > 0 || emitter.canonical || event.sequenceStyle() == yamlFlowSequenceStyle || - yamlEmitterCheckEmptySequence(emitter) { - emitter.state = yamlEmitFlowSequenceFirstItemState - } else { - emitter.state = yamlEmitBlockSequenceFirstItemState - } - return true -} - -// Expect MAPPING-START. -func yamlEmitterEmitMappingStart(emitter *yamlEmitterT, event *yamlEventT) bool { - if !yamlEmitterProcessAnchor(emitter) { - return false - } - if !yamlEmitterProcessTag(emitter) { - return false - } - if emitter.flowLevel > 0 || emitter.canonical || event.mappingStyle() == yamlFlowMappingStyle || - yamlEmitterCheckEmptyMapping(emitter) { - emitter.state = yamlEmitFlowMappingFirstKeyState - } else { - emitter.state = yamlEmitBlockMappingFirstKeyState - } - return true -} - -// Check if the document content is an empty scalar. -func yamlEmitterCheckEmptyDocument(emitter *yamlEmitterT) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yamlEmitterCheckEmptySequence(emitter *yamlEmitterT) bool { - if len(emitter.events)-emitter.eventsHead < 2 { - return false - } - return emitter.events[emitter.eventsHead].typ == yamlSequenceStartEvent && - emitter.events[emitter.eventsHead+1].typ == yamlSequenceEndEvent -} - -// Check if the next events represent an empty mapping. -func yamlEmitterCheckEmptyMapping(emitter *yamlEmitterT) bool { - if len(emitter.events)-emitter.eventsHead < 2 { - return false - } - return emitter.events[emitter.eventsHead].typ == yamlMappingStartEvent && - emitter.events[emitter.eventsHead+1].typ == yamlMappingEndEvent -} - -// Check if the next node can be expressed as a simple key. -func yamlEmitterCheckSimpleKey(emitter *yamlEmitterT) bool { - length := 0 - switch emitter.events[emitter.eventsHead].typ { - case yamlAliasEvent: - length += len(emitter.anchorData.anchor) - case yamlScalarEvent: - if emitter.scalarData.multiline { - return false - } - length += len(emitter.anchorData.anchor) + - len(emitter.tagData.handle) + - len(emitter.tagData.suffix) + - len(emitter.scalarData.value) - case yamlSequenceStartEvent: - if !yamlEmitterCheckEmptySequence(emitter) { - return false - } - length += len(emitter.anchorData.anchor) + - len(emitter.tagData.handle) + - len(emitter.tagData.suffix) - case yamlMappingStartEvent: - if !yamlEmitterCheckEmptyMapping(emitter) { - return false - } - length += len(emitter.anchorData.anchor) + - len(emitter.tagData.handle) + - len(emitter.tagData.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yamlEmitterSelectScalarStyle(emitter *yamlEmitterT, event *yamlEventT) bool { - - noTag := len(emitter.tagData.handle) == 0 && len(emitter.tagData.suffix) == 0 - if noTag && !event.implicit && !event.quotedImplicit { - return yamlEmitterSetEmitterError(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalarStyle() - if style == yamlAnyScalarStyle { - style = yamlPlainScalarStyle - } - if emitter.canonical { - style = yamlDoubleQuotedScalarStyle - } - if emitter.simpleKeyContext && emitter.scalarData.multiline { - style = yamlDoubleQuotedScalarStyle - } - - if style == yamlPlainScalarStyle { - if emitter.flowLevel > 0 && !emitter.scalarData.flowPlainAllowed || - emitter.flowLevel == 0 && !emitter.scalarData.blockPlainAllowed { - style = yamlSingleQuotedScalarStyle - } - if len(emitter.scalarData.value) == 0 && (emitter.flowLevel > 0 || emitter.simpleKeyContext) { - style = yamlSingleQuotedScalarStyle - } - if noTag && !event.implicit { - style = yamlSingleQuotedScalarStyle - } - } - if style == yamlSingleQuotedScalarStyle { - if !emitter.scalarData.singleQuotedAllowed { - style = yamlDoubleQuotedScalarStyle - } - } - if style == yamlLiteralScalarStyle || style == yamlFoldedScalarStyle { - if !emitter.scalarData.blockAllowed || emitter.flowLevel > 0 || emitter.simpleKeyContext { - style = yamlDoubleQuotedScalarStyle - } - } - - if noTag && !event.quotedImplicit && style != yamlPlainScalarStyle { - emitter.tagData.handle = []byte{'!'} - } - emitter.scalarData.style = style - return true -} - -// Write an anchor. -func yamlEmitterProcessAnchor(emitter *yamlEmitterT) bool { - if emitter.anchorData.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchorData.alias { - c[0] = '*' - } - if !yamlEmitterWriteIndicator(emitter, c, true, false, false) { - return false - } - return yamlEmitterWriteAnchor(emitter, emitter.anchorData.anchor) -} - -// Write a tag. -func yamlEmitterProcessTag(emitter *yamlEmitterT) bool { - if len(emitter.tagData.handle) == 0 && len(emitter.tagData.suffix) == 0 { - return true - } - if len(emitter.tagData.handle) > 0 { - if !yamlEmitterWriteTagHandle(emitter, emitter.tagData.handle) { - return false - } - if len(emitter.tagData.suffix) > 0 { - if !yamlEmitterWriteTagContent(emitter, emitter.tagData.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yamlEmitterWriteIndicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yamlEmitterWriteTagContent(emitter, emitter.tagData.suffix, false) { - return false - } - if !yamlEmitterWriteIndicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yamlEmitterProcessScalar(emitter *yamlEmitterT) bool { - switch emitter.scalarData.style { - case yamlPlainScalarStyle: - return yamlEmitterWritePlainScalar(emitter, emitter.scalarData.value, !emitter.simpleKeyContext) - - case yamlSingleQuotedScalarStyle: - return yamlEmitterWriteSingleQuotedScalar(emitter, emitter.scalarData.value, !emitter.simpleKeyContext) - - case yamlDoubleQuotedScalarStyle: - return yamlEmitterWriteDoubleQuotedScalar(emitter, emitter.scalarData.value, !emitter.simpleKeyContext) - - case yamlLiteralScalarStyle: - return yamlEmitterWriteLiteralScalar(emitter, emitter.scalarData.value) - - case yamlFoldedScalarStyle: - return yamlEmitterWriteFoldedScalar(emitter, emitter.scalarData.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yamlEmitterAnalyzeVersionDirective(emitter *yamlEmitterT, versionDirective *yamlVersionDirectiveT) bool { - if versionDirective.major != 1 || versionDirective.minor != 1 { - return yamlEmitterSetEmitterError(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yamlEmitterAnalyzeTagDirective(emitter *yamlEmitterT, tagDirective *yamlTagDirectiveT) bool { - handle := tagDirective.handle - prefix := tagDirective.prefix - if len(handle) == 0 { - return yamlEmitterSetEmitterError(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yamlEmitterSetEmitterError(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yamlEmitterSetEmitterError(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !isAlpha(handle, i) { - return yamlEmitterSetEmitterError(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yamlEmitterSetEmitterError(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yamlEmitterAnalyzeAnchor(emitter *yamlEmitterT, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yamlEmitterSetEmitterError(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !isAlpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yamlEmitterSetEmitterError(emitter, problem) - } - } - emitter.anchorData.anchor = anchor - emitter.anchorData.alias = alias - return true -} - -// Check if a tag is valid. -func yamlEmitterAnalyzeTag(emitter *yamlEmitterT, tag []byte) bool { - if len(tag) == 0 { - return yamlEmitterSetEmitterError(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tagDirectives); i++ { - tagDirective := &emitter.tagDirectives[i] - if bytes.HasPrefix(tag, tagDirective.prefix) { - emitter.tagData.handle = tagDirective.handle - emitter.tagData.suffix = tag[len(tagDirective.prefix):] - return true - } - } - emitter.tagData.suffix = tag - return true -} - -// Check if a scalar is valid. -func yamlEmitterAnalyzeScalar(emitter *yamlEmitterT, value []byte) bool { - var ( - blockIndicators = false - flowIndicators = false - lineBreaks = false - specialCharacters = false - - leadingSpace = false - leadingBreak = false - trailingSpace = false - trailingBreak = false - breakSpace = false - spaceBreak = false - - precededByWhitespace = false - followedByWhitespace = false - previousSpace = false - previousBreak = false - ) - - emitter.scalarData.value = value - - if len(value) == 0 { - emitter.scalarData.multiline = false - emitter.scalarData.flowPlainAllowed = false - emitter.scalarData.blockPlainAllowed = true - emitter.scalarData.singleQuotedAllowed = true - emitter.scalarData.blockAllowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - blockIndicators = true - flowIndicators = true - } - - precededByWhitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followedByWhitespace = i+w >= len(value) || isBlank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flowIndicators = true - blockIndicators = true - case '?', ':': - flowIndicators = true - if followedByWhitespace { - blockIndicators = true - } - case '-': - if followedByWhitespace { - flowIndicators = true - blockIndicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flowIndicators = true - case ':': - flowIndicators = true - if followedByWhitespace { - blockIndicators = true - } - case '#': - if precededByWhitespace { - flowIndicators = true - blockIndicators = true - } - } - } - - if !isPrintable(value, i) || !isASCII(value, i) && !emitter.unicode { - specialCharacters = true - } - if isSpace(value, i) { - if i == 0 { - leadingSpace = true - } - if i+width(value[i]) == len(value) { - trailingSpace = true - } - if previousBreak { - breakSpace = true - } - previousSpace = true - previousBreak = false - } else if isBreak(value, i) { - lineBreaks = true - if i == 0 { - leadingBreak = true - } - if i+width(value[i]) == len(value) { - trailingBreak = true - } - if previousSpace { - spaceBreak = true - } - previousSpace = false - previousBreak = true - } else { - previousSpace = false - previousBreak = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - precededByWhitespace = isBlankz(value, i) - } - - emitter.scalarData.multiline = lineBreaks - emitter.scalarData.flowPlainAllowed = true - emitter.scalarData.blockPlainAllowed = true - emitter.scalarData.singleQuotedAllowed = true - emitter.scalarData.blockAllowed = true - - if leadingSpace || leadingBreak || trailingSpace || trailingBreak { - emitter.scalarData.flowPlainAllowed = false - emitter.scalarData.blockPlainAllowed = false - } - if trailingSpace { - emitter.scalarData.blockAllowed = false - } - if breakSpace { - emitter.scalarData.flowPlainAllowed = false - emitter.scalarData.blockPlainAllowed = false - emitter.scalarData.singleQuotedAllowed = false - } - if spaceBreak || specialCharacters { - emitter.scalarData.flowPlainAllowed = false - emitter.scalarData.blockPlainAllowed = false - emitter.scalarData.singleQuotedAllowed = false - emitter.scalarData.blockAllowed = false - } - if lineBreaks { - emitter.scalarData.flowPlainAllowed = false - emitter.scalarData.blockPlainAllowed = false - } - if flowIndicators { - emitter.scalarData.flowPlainAllowed = false - } - if blockIndicators { - emitter.scalarData.blockPlainAllowed = false - } - return true -} - -// Check if the event data is valid. -func yamlEmitterAnalyzeEvent(emitter *yamlEmitterT, event *yamlEventT) bool { - - emitter.anchorData.anchor = nil - emitter.tagData.handle = nil - emitter.tagData.suffix = nil - emitter.scalarData.value = nil - - switch event.typ { - case yamlAliasEvent: - if !yamlEmitterAnalyzeAnchor(emitter, event.anchor, true) { - return false - } - - case yamlScalarEvent: - if len(event.anchor) > 0 { - if !yamlEmitterAnalyzeAnchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quotedImplicit)) { - if !yamlEmitterAnalyzeTag(emitter, event.tag) { - return false - } - } - if !yamlEmitterAnalyzeScalar(emitter, event.value) { - return false - } - - case yamlSequenceStartEvent: - if len(event.anchor) > 0 { - if !yamlEmitterAnalyzeAnchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yamlEmitterAnalyzeTag(emitter, event.tag) { - return false - } - } - - case yamlMappingStartEvent: - if len(event.anchor) > 0 { - if !yamlEmitterAnalyzeAnchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yamlEmitterAnalyzeTag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yamlEmitterWriteBom(emitter *yamlEmitterT) bool { - if !flush(emitter) { - return false - } - pos := emitter.bufferPos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.bufferPos += 3 - return true -} - -func yamlEmitterWriteIndent(emitter *yamlEmitterT) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !putBreak(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yamlEmitterWriteIndicator(emitter *yamlEmitterT, indicator []byte, needWhitespace, isWhitespace, isIndention bool) bool { - if needWhitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !writeAll(emitter, indicator) { - return false - } - emitter.whitespace = isWhitespace - emitter.indention = (emitter.indention && isIndention) - emitter.openEnded = false - return true -} - -func yamlEmitterWriteAnchor(emitter *yamlEmitterT, value []byte) bool { - if !writeAll(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yamlEmitterWriteTagHandle(emitter *yamlEmitterT, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !writeAll(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yamlEmitterWriteTagContent(emitter *yamlEmitterT, value []byte, needWhitespace bool) bool { - if needWhitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var mustWrite bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - mustWrite = true - default: - mustWrite = isAlpha(value, i) - } - if mustWrite { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yamlEmitterWritePlainScalar(emitter *yamlEmitterT, value []byte, allowBreaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if isSpace(value, i) { - if allowBreaks && !spaces && emitter.column > emitter.bestWidth && !isSpace(value, i+1) { - if !yamlEmitterWriteIndent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if isBreak(value, i) { - if !breaks && value[i] == '\n' { - if !putBreak(emitter) { - return false - } - } - if !writeBreak(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.rootContext { - emitter.openEnded = true - } - - return true -} - -func yamlEmitterWriteSingleQuotedScalar(emitter *yamlEmitterT, value []byte, allowBreaks bool) bool { - - if !yamlEmitterWriteIndicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if isSpace(value, i) { - if allowBreaks && !spaces && emitter.column > emitter.bestWidth && i > 0 && i < len(value)-1 && !isSpace(value, i+1) { - if !yamlEmitterWriteIndent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if isBreak(value, i) { - if !breaks && value[i] == '\n' { - if !putBreak(emitter) { - return false - } - } - if !writeBreak(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yamlEmitterWriteIndicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yamlEmitterWriteDoubleQuotedScalar(emitter *yamlEmitterT, value []byte, allowBreaks bool) bool { - spaces := false - if !yamlEmitterWriteIndicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !isPrintable(value, i) || (!emitter.unicode && !isASCII(value, i)) || - isBom(value, i) || isBreak(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if isSpace(value, i) { - if allowBreaks && !spaces && emitter.column > emitter.bestWidth && i > 0 && i < len(value)-1 { - if !yamlEmitterWriteIndent(emitter) { - return false - } - if isSpace(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yamlEmitterWriteIndicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yamlEmitterWriteBlockScalarHints(emitter *yamlEmitterT, value []byte) bool { - if isSpace(value, 0) || isBreak(value, 0) { - indentHint := []byte{'0' + byte(emitter.bestIndent)} - if !yamlEmitterWriteIndicator(emitter, indentHint, false, false, false) { - return false - } - } - - emitter.openEnded = false - - var chompHint [1]byte - if len(value) == 0 { - chompHint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !isBreak(value, i) { - chompHint[0] = '-' - } else if i == 0 { - chompHint[0] = '+' - emitter.openEnded = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if isBreak(value, i) { - chompHint[0] = '+' - emitter.openEnded = true - } - } - } - if chompHint[0] != 0 { - if !yamlEmitterWriteIndicator(emitter, chompHint[:], false, false, false) { - return false - } - } - return true -} - -func yamlEmitterWriteLiteralScalar(emitter *yamlEmitterT, value []byte) bool { - if !yamlEmitterWriteIndicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yamlEmitterWriteBlockScalarHints(emitter, value) { - return false - } - if !putBreak(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if isBreak(value, i) { - if !writeBreak(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yamlEmitterWriteIndent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yamlEmitterWriteFoldedScalar(emitter *yamlEmitterT, value []byte) bool { - if !yamlEmitterWriteIndicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yamlEmitterWriteBlockScalarHints(emitter, value) { - return false - } - - if !putBreak(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leadingSpaces := true - for i := 0; i < len(value); { - if isBreak(value, i) { - if !breaks && !leadingSpaces && value[i] == '\n' { - k := 0 - for isBreak(value, k) { - k += width(value[k]) - } - if !isBlankz(value, k) { - if !putBreak(emitter) { - return false - } - } - } - if !writeBreak(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yamlEmitterWriteIndent(emitter) { - return false - } - leadingSpaces = isBlank(value, i) - } - if !breaks && isSpace(value, i) && !isSpace(value, i+1) && emitter.column > emitter.bestWidth { - if !yamlEmitterWriteIndent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/encode.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/encode.go deleted file mode 100644 index a01a0e9c9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/encode.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yamlEmitterT - event yamlEventT - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yamlEmitterInitialize(&e.emitter) - yamlEmitterSetOutputString(&e.emitter, &e.out) - yamlEmitterSetUnicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yamlEmitterInitialize(&e.emitter) - yamlEmitterSetOutputWriter(&e.emitter, w) - yamlEmitterSetUnicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yamlStreamStartEventInitialize(&e.event, yamlUtf8Encoding) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.openEnded = false - yamlStreamEndEventInitialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yamlEmitterDelete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yamlEmitterEmit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yamlDocumentStartEventInitialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yamlDocumentEndEventInitialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yamlBlockMappingStyle - if e.flow { - e.flow = false - style = yamlFlowMappingStyle - } - yamlMappingStartEventInitialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yamlMappingEndEventInitialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yamlBlockSequenceStyle - if e.flow { - e.flow = false - style = yamlFlowSequenceStyle - } - e.must(yamlSequenceStartEventInitialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yamlSequenceEndEventInitialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yamlScalarStyleT - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yamlBinaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yamlBinaryTag - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yamlStrTag && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yamlLiteralScalarStyle - case canUsePlain: - style = yamlPlainScalarStyle - default: - style = yamlDoubleQuotedScalarStyle - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yamlPlainScalarStyle) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yamlPlainScalarStyle) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yamlPlainScalarStyle) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yamlPlainScalarStyle) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yamlPlainScalarStyle) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yamlPlainScalarStyle) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yamlScalarStyleT) { - implicit := tag == "" - e.must(yamlScalarEventInitialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/parserc.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/parserc.go deleted file mode 100644 index 684880a05..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/parserc.go +++ /dev/null @@ -1,1111 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peekToken(parser *yamlParserT) *yamlTokenT { - if parser.tokenAvailable || yamlParserFetchMoreTokens(parser) { - return &parser.tokens[parser.tokensHead] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skipToken(parser *yamlParserT) { - parser.tokenAvailable = false - parser.tokensParsed++ - parser.streamEndProduced = parser.tokens[parser.tokensHead].typ == yamlStreamEndToken - parser.tokensHead++ -} - -// Get the next event. -func yamlParserParse(parser *yamlParserT, event *yamlEventT) bool { - // Erase the event object. - *event = yamlEventT{} - - // No events after the end of the stream or error. - if parser.streamEndProduced || parser.error != yamlNoError || parser.state == yamlParseEndState { - return true - } - - // Generate the next event. - return yamlParserStateMachine(parser, event) -} - -// Set parser error. -func yamlParserSetParserError(parser *yamlParserT, problem string, problemMark yamlMarkT) bool { - parser.error = yamlParserError - parser.problem = problem - parser.problemMark = problemMark - return false -} - -func yamlParserSetParserErrorContext(parser *yamlParserT, context string, contextMark yamlMarkT, problem string, problemMark yamlMarkT) bool { - parser.error = yamlParserError - parser.context = context - parser.contextMark = contextMark - parser.problem = problem - parser.problemMark = problemMark - return false -} - -// State dispatcher. -func yamlParserStateMachine(parser *yamlParserT, event *yamlEventT) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yamlParseStreamStartState: - return yamlParserParseStreamStart(parser, event) - - case yamlParseImplicitDocumentStartState: - return yamlParserParseDocumentStart(parser, event, true) - - case yamlParseDocumentStartState: - return yamlParserParseDocumentStart(parser, event, false) - - case yamlParseDocumentContentState: - return yamlParserParseDocumentContent(parser, event) - - case yamlParseDocumentEndState: - return yamlParserParseDocumentEnd(parser, event) - - case yamlParseBlockNodeState: - return yamlParserParseNode(parser, event, true, false) - - case yamlParseBlockNodeOrIndentlessSequenceState: - return yamlParserParseNode(parser, event, true, true) - - case yamlParseFlowNodeState: - return yamlParserParseNode(parser, event, false, false) - - case yamlParseBlockSequenceFirstEntryState: - return yamlParserParseBlockSequenceEntry(parser, event, true) - - case yamlParseBlockSequenceEntryState: - return yamlParserParseBlockSequenceEntry(parser, event, false) - - case yamlParseIndentlessSequenceEntryState: - return yamlParserParseIndentlessSequenceEntry(parser, event) - - case yamlParseBlockMappingFirstKeyState: - return yamlParserParseBlockMappingKey(parser, event, true) - - case yamlParseBlockMappingKeyState: - return yamlParserParseBlockMappingKey(parser, event, false) - - case yamlParseBlockMappingValueState: - return yamlParserParseBlockMappingValue(parser, event) - - case yamlParseFlowSequenceFirstEntryState: - return yamlParserParseFlowSequenceEntry(parser, event, true) - - case yamlParseFlowSequenceEntryState: - return yamlParserParseFlowSequenceEntry(parser, event, false) - - case yamlParseFlowSequenceEntryMappingKeyState: - return yamlParserParseFlowSequenceEntryMappingKey(parser, event) - - case yamlParseFlowSequenceEntryMappingValueState: - return yamlParserParseFlowSequenceEntryMappingValue(parser, event) - - case yamlParseFlowSequenceEntryMappingEndState: - return yamlParserParseFlowSequenceEntryMappingEnd(parser, event) - - case yamlParseFlowMappingFirstKeyState: - return yamlParserParseFlowMappingKey(parser, event, true) - - case yamlParseFlowMappingKeyState: - return yamlParserParseFlowMappingKey(parser, event, false) - - case yamlParseFlowMappingValueState: - return yamlParserParseFlowMappingValue(parser, event, false) - - case yamlParseFlowMappingEmptyValueState: - return yamlParserParseFlowMappingValue(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yamlParserParseStreamStart(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlStreamStartToken { - return yamlParserSetParserError(parser, "did not find expected ", token.startMark) - } - parser.state = yamlParseImplicitDocumentStartState - *event = yamlEventT{ - typ: yamlStreamStartEvent, - startMark: token.startMark, - endMark: token.endMark, - encoding: token.encoding, - } - skipToken(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yamlParserParseDocumentStart(parser *yamlParserT, event *yamlEventT, implicit bool) bool { - - token := peekToken(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yamlDocumentEndToken { - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yamlVersionDirectiveToken && - token.typ != yamlTagDirectiveToken && - token.typ != yamlDocumentStartToken && - token.typ != yamlStreamEndToken { - // Parse an implicit document. - if !yamlParserProcessDirectives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yamlParseDocumentEndState) - parser.state = yamlParseBlockNodeState - - *event = yamlEventT{ - typ: yamlDocumentStartEvent, - startMark: token.startMark, - endMark: token.endMark, - } - - } else if token.typ != yamlStreamEndToken { - // Parse an explicit document. - var versionDirective *yamlVersionDirectiveT - var tagDirectives []yamlTagDirectiveT - startMark := token.startMark - if !yamlParserProcessDirectives(parser, &versionDirective, &tagDirectives) { - return false - } - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlDocumentStartToken { - yamlParserSetParserError(parser, - "did not find expected ", token.startMark) - return false - } - parser.states = append(parser.states, yamlParseDocumentEndState) - parser.state = yamlParseDocumentContentState - endMark := token.endMark - - *event = yamlEventT{ - typ: yamlDocumentStartEvent, - startMark: startMark, - endMark: endMark, - versionDirective: versionDirective, - tagDirectives: tagDirectives, - implicit: false, - } - skipToken(parser) - - } else { - // Parse the stream end. - parser.state = yamlParseEndState - *event = yamlEventT{ - typ: yamlStreamEndEvent, - startMark: token.startMark, - endMark: token.endMark, - } - skipToken(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yamlParserParseDocumentContent(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - if token.typ == yamlVersionDirectiveToken || - token.typ == yamlTagDirectiveToken || - token.typ == yamlDocumentStartToken || - token.typ == yamlDocumentEndToken || - token.typ == yamlStreamEndToken { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yamlParserProcessEmptyScalar(parser, event, - token.startMark) - } - return yamlParserParseNode(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yamlParserParseDocumentEnd(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - - startMark := token.startMark - endMark := token.startMark - - implicit := true - if token.typ == yamlDocumentEndToken { - endMark = token.endMark - skipToken(parser) - implicit = false - } - - parser.tagDirectives = parser.tagDirectives[:0] - - parser.state = yamlParseDocumentStartState - *event = yamlEventT{ - typ: yamlDocumentEndEvent, - startMark: startMark, - endMark: endMark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yamlParserParseNode(parser *yamlParserT, event *yamlEventT, block, indentlessSequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peekToken(parser) - if token == nil { - return false - } - - if token.typ == yamlAliasToken { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yamlEventT{ - typ: yamlAliasEvent, - startMark: token.startMark, - endMark: token.endMark, - anchor: token.value, - } - skipToken(parser) - return true - } - - startMark := token.startMark - endMark := token.startMark - - var tagToken bool - var tagHandle, tagSuffix, anchor []byte - var tagMark yamlMarkT - if token.typ == yamlAnchorToken { - anchor = token.value - startMark = token.startMark - endMark = token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ == yamlTagToken { - tagToken = true - tagHandle = token.value - tagSuffix = token.suffix - tagMark = token.startMark - endMark = token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - } - } else if token.typ == yamlTagToken { - tagToken = true - tagHandle = token.value - tagSuffix = token.suffix - startMark = token.startMark - tagMark = token.startMark - endMark = token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ == yamlAnchorToken { - anchor = token.value - endMark = token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tagToken { - if len(tagHandle) == 0 { - tag = tagSuffix - tagSuffix = nil - } else { - for i := range parser.tagDirectives { - if bytes.Equal(parser.tagDirectives[i].handle, tagHandle) { - tag = append([]byte(nil), parser.tagDirectives[i].prefix...) - tag = append(tag, tagSuffix...) - break - } - } - if len(tag) == 0 { - yamlParserSetParserErrorContext(parser, - "while parsing a node", startMark, - "found undefined tag handle", tagMark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentlessSequence && token.typ == yamlBlockEntryToken { - endMark = token.endMark - parser.state = yamlParseIndentlessSequenceEntryState - *event = yamlEventT{ - typ: yamlSequenceStartEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(yamlBlockSequenceStyle), - } - return true - } - if token.typ == yamlScalarToken { - var plainImplicit, quotedImplicit bool - endMark = token.endMark - if (len(tag) == 0 && token.style == yamlPlainScalarStyle) || (len(tag) == 1 && tag[0] == '!') { - plainImplicit = true - } else if len(tag) == 0 { - quotedImplicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yamlEventT{ - typ: yamlScalarEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plainImplicit, - quotedImplicit: quotedImplicit, - style: yamlStyleT(token.style), - } - skipToken(parser) - return true - } - if token.typ == yamlFlowSequenceStartToken { - // [Go] Some of the events below can be merged as they differ only on style. - endMark = token.endMark - parser.state = yamlParseFlowSequenceFirstEntryState - *event = yamlEventT{ - typ: yamlSequenceStartEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(yamlFlowSequenceStyle), - } - return true - } - if token.typ == yamlFlowMappingStartToken { - endMark = token.endMark - parser.state = yamlParseFlowMappingFirstKeyState - *event = yamlEventT{ - typ: yamlMappingStartEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(yamlFlowMappingStyle), - } - return true - } - if block && token.typ == yamlBlockSequenceStartToken { - endMark = token.endMark - parser.state = yamlParseBlockSequenceFirstEntryState - *event = yamlEventT{ - typ: yamlSequenceStartEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(yamlBlockSequenceStyle), - } - return true - } - if block && token.typ == yamlBlockMappingStartToken { - endMark = token.endMark - parser.state = yamlParseBlockMappingFirstKeyState - *event = yamlEventT{ - typ: yamlMappingStartEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yamlStyleT(yamlBlockMappingStyle), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yamlEventT{ - typ: yamlScalarEvent, - startMark: startMark, - endMark: endMark, - anchor: anchor, - tag: tag, - implicit: implicit, - quotedImplicit: false, - style: yamlStyleT(yamlPlainScalarStyle), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yamlParserSetParserErrorContext(parser, context, startMark, - "did not find expected node content", token.startMark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yamlParserParseBlockSequenceEntry(parser *yamlParserT, event *yamlEventT, first bool) bool { - if first { - token := peekToken(parser) - parser.marks = append(parser.marks, token.startMark) - skipToken(parser) - } - - token := peekToken(parser) - if token == nil { - return false - } - - if token.typ == yamlBlockEntryToken { - parser.pendingSeqItemEvent = &yamlEventT{ - startMark: token.startMark, - endMark: token.endMark, - } - mark := token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlBlockEntryToken && token.typ != yamlBlockEndToken { - parser.states = append(parser.states, yamlParseBlockSequenceEntryState) - return yamlParserParseNode(parser, event, true, false) - } - parser.state = yamlParseBlockSequenceEntryState - return yamlParserProcessEmptyScalar(parser, event, mark) - } - if token.typ == yamlBlockEndToken { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yamlEventT{ - typ: yamlSequenceEndEvent, - startMark: token.startMark, - endMark: token.endMark, - } - - skipToken(parser) - return true - } - - contextMark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yamlParserSetParserErrorContext(parser, - "while parsing a block collection", contextMark, - "did not find expected '-' indicator", token.startMark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yamlParserParseIndentlessSequenceEntry(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - - if token.typ == yamlBlockEntryToken { - parser.pendingSeqItemEvent = &yamlEventT{ - startMark: token.startMark, - endMark: token.endMark, - } - mark := token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlBlockEntryToken && - token.typ != yamlKeyToken && - token.typ != yamlValueToken && - token.typ != yamlBlockEndToken { - parser.states = append(parser.states, yamlParseIndentlessSequenceEntryState) - return yamlParserParseNode(parser, event, true, false) - } - parser.state = yamlParseIndentlessSequenceEntryState - return yamlParserProcessEmptyScalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yamlEventT{ - typ: yamlSequenceEndEvent, - startMark: token.startMark, - endMark: token.startMark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yamlParserParseBlockMappingKey(parser *yamlParserT, event *yamlEventT, first bool) bool { - if first { - token := peekToken(parser) - parser.marks = append(parser.marks, token.startMark) - skipToken(parser) - } - - token := peekToken(parser) - if token == nil { - return false - } - - if token.typ == yamlKeyToken { - mark := token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlKeyToken && - token.typ != yamlValueToken && - token.typ != yamlBlockEndToken { - parser.states = append(parser.states, yamlParseBlockMappingValueState) - return yamlParserParseNode(parser, event, true, true) - } - parser.state = yamlParseBlockMappingValueState - return yamlParserProcessEmptyScalar(parser, event, mark) - } else if token.typ == yamlBlockEndToken { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yamlEventT{ - typ: yamlMappingEndEvent, - startMark: token.startMark, - endMark: token.endMark, - } - skipToken(parser) - return true - } - - contextMark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yamlParserSetParserErrorContext(parser, - "while parsing a block mapping", contextMark, - "did not find expected key", token.startMark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yamlParserParseBlockMappingValue(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - if token.typ == yamlValueToken { - mark := token.endMark - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlKeyToken && - token.typ != yamlValueToken && - token.typ != yamlBlockEndToken { - parser.states = append(parser.states, yamlParseBlockMappingKeyState) - return yamlParserParseNode(parser, event, true, true) - } - parser.state = yamlParseBlockMappingKeyState - return yamlParserProcessEmptyScalar(parser, event, mark) - } - parser.state = yamlParseBlockMappingKeyState - return yamlParserProcessEmptyScalar(parser, event, token.startMark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yamlParserParseFlowSequenceEntry(parser *yamlParserT, event *yamlEventT, first bool) bool { - if first { - token := peekToken(parser) - parser.pendingSeqItemEvent = &yamlEventT{ - startMark: token.startMark, - endMark: token.endMark, - } - parser.marks = append(parser.marks, token.startMark) - skipToken(parser) - } - token := peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlFlowSequenceEndToken { - if !first { - if token.typ == yamlFlowEntryToken { - parser.pendingSeqItemEvent = &yamlEventT{ - startMark: token.startMark, - endMark: token.endMark, - } - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - } else { - contextMark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yamlParserSetParserErrorContext(parser, - "while parsing a flow sequence", contextMark, - "did not find expected ',' or ']'", token.startMark) - } - } - - if token.typ == yamlKeyToken { - parser.state = yamlParseFlowSequenceEntryMappingKeyState - *event = yamlEventT{ - typ: yamlMappingStartEvent, - startMark: token.startMark, - endMark: token.endMark, - implicit: true, - style: yamlStyleT(yamlFlowMappingStyle), - } - skipToken(parser) - return true - } else if token.typ != yamlFlowSequenceEndToken { - parser.states = append(parser.states, yamlParseFlowSequenceEntryState) - return yamlParserParseNode(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yamlEventT{ - typ: yamlSequenceEndEvent, - startMark: token.startMark, - endMark: token.endMark, - } - - skipToken(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yamlParserParseFlowSequenceEntryMappingKey(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlValueToken && - token.typ != yamlFlowEntryToken && - token.typ != yamlFlowSequenceEndToken { - parser.states = append(parser.states, yamlParseFlowSequenceEntryMappingValueState) - return yamlParserParseNode(parser, event, false, false) - } - mark := token.endMark - skipToken(parser) - parser.state = yamlParseFlowSequenceEntryMappingValueState - return yamlParserProcessEmptyScalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yamlParserParseFlowSequenceEntryMappingValue(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - if token.typ == yamlValueToken { - skipToken(parser) - token := peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlFlowEntryToken && token.typ != yamlFlowSequenceEndToken { - parser.states = append(parser.states, yamlParseFlowSequenceEntryMappingEndState) - return yamlParserParseNode(parser, event, false, false) - } - } - parser.state = yamlParseFlowSequenceEntryMappingEndState - return yamlParserProcessEmptyScalar(parser, event, token.startMark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yamlParserParseFlowSequenceEntryMappingEnd(parser *yamlParserT, event *yamlEventT) bool { - token := peekToken(parser) - if token == nil { - return false - } - parser.state = yamlParseFlowSequenceEntryState - *event = yamlEventT{ - typ: yamlMappingEndEvent, - startMark: token.startMark, - endMark: token.startMark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yamlParserParseFlowMappingKey(parser *yamlParserT, event *yamlEventT, first bool) bool { - if first { - token := peekToken(parser) - parser.marks = append(parser.marks, token.startMark) - skipToken(parser) - } - - token := peekToken(parser) - if token == nil { - return false - } - - if token.typ != yamlFlowMappingEndToken { - if !first { - if token.typ == yamlFlowEntryToken { - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - } else { - contextMark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yamlParserSetParserErrorContext(parser, - "while parsing a flow mapping", contextMark, - "did not find expected ',' or '}'", token.startMark) - } - } - - if token.typ == yamlKeyToken { - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlValueToken && - token.typ != yamlFlowEntryToken && - token.typ != yamlFlowMappingEndToken { - parser.states = append(parser.states, yamlParseFlowMappingValueState) - return yamlParserParseNode(parser, event, false, false) - } - parser.state = yamlParseFlowMappingValueState - return yamlParserProcessEmptyScalar(parser, event, token.startMark) - } else if token.typ != yamlFlowMappingEndToken { - parser.states = append(parser.states, yamlParseFlowMappingEmptyValueState) - return yamlParserParseNode(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yamlEventT{ - typ: yamlMappingEndEvent, - startMark: token.startMark, - endMark: token.endMark, - } - skipToken(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yamlParserParseFlowMappingValue(parser *yamlParserT, event *yamlEventT, empty bool) bool { - token := peekToken(parser) - if token == nil { - return false - } - if empty { - parser.state = yamlParseFlowMappingKeyState - return yamlParserProcessEmptyScalar(parser, event, token.startMark) - } - if token.typ == yamlValueToken { - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - if token.typ != yamlFlowEntryToken && token.typ != yamlFlowMappingEndToken { - parser.states = append(parser.states, yamlParseFlowMappingKeyState) - return yamlParserParseNode(parser, event, false, false) - } - } - parser.state = yamlParseFlowMappingKeyState - return yamlParserProcessEmptyScalar(parser, event, token.startMark) -} - -// Generate an empty scalar event. -func yamlParserProcessEmptyScalar(parser *yamlParserT, event *yamlEventT, mark yamlMarkT) bool { - *event = yamlEventT{ - typ: yamlScalarEvent, - startMark: mark, - endMark: mark, - value: nil, // Empty - implicit: true, - style: yamlStyleT(yamlPlainScalarStyle), - } - return true -} - -var defaultTagDirectives = []yamlTagDirectiveT{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yamlParserProcessDirectives(parser *yamlParserT, - versionDirectiveRef **yamlVersionDirectiveT, - tagDirectivesRef *[]yamlTagDirectiveT) bool { - - var versionDirective *yamlVersionDirectiveT - var tagDirectives []yamlTagDirectiveT - - token := peekToken(parser) - if token == nil { - return false - } - - for token.typ == yamlVersionDirectiveToken || token.typ == yamlTagDirectiveToken { - if token.typ == yamlVersionDirectiveToken { - if versionDirective != nil { - yamlParserSetParserError(parser, - "found duplicate %YAML directive", token.startMark) - return false - } - if token.major != 1 || token.minor != 1 { - yamlParserSetParserError(parser, - "found incompatible YAML document", token.startMark) - return false - } - versionDirective = &yamlVersionDirectiveT{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yamlTagDirectiveToken { - value := yamlTagDirectiveT{ - handle: token.value, - prefix: token.prefix, - } - if !yamlParserAppendTagDirective(parser, value, false, token.startMark) { - return false - } - tagDirectives = append(tagDirectives, value) - } - - skipToken(parser) - token = peekToken(parser) - if token == nil { - return false - } - } - - for i := range defaultTagDirectives { - if !yamlParserAppendTagDirective(parser, defaultTagDirectives[i], true, token.startMark) { - return false - } - } - - if versionDirectiveRef != nil { - *versionDirectiveRef = versionDirective - } - if tagDirectivesRef != nil { - *tagDirectivesRef = tagDirectives - } - return true -} - -// Append a tag directive to the directives stack. -func yamlParserAppendTagDirective(parser *yamlParserT, value yamlTagDirectiveT, allowDuplicates bool, mark yamlMarkT) bool { - for i := range parser.tagDirectives { - if bytes.Equal(value.handle, parser.tagDirectives[i].handle) { - if allowDuplicates { - return true - } - return yamlParserSetParserError(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - valueCopy := yamlTagDirectiveT{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(valueCopy.handle, value.handle) - copy(valueCopy.prefix, value.prefix) - parser.tagDirectives = append(parser.tagDirectives, valueCopy) - return true -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/readerc.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/readerc.go deleted file mode 100644 index eb3e1d0c0..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/readerc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yamlParserSetReaderError(parser *yamlParserT, problem string, offset int, value int) bool { - parser.error = yamlReaderError - parser.problem = problem - parser.problemOffset = offset - parser.problemValue = value - return false -} - -// Byte order marks. -const ( - bomUtf8 = "\xef\xbb\xbf" - bomUtf16le = "\xff\xfe" - bomUtf16be = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yamlParserDetermineEncoding(parser *yamlParserT) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.rawBuffer)-parser.rawBufferPos < 3 { - if !yamlParserUpdateRawBuffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.rawBuffer - pos := parser.rawBufferPos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bomUtf16le[0] && buf[pos+1] == bomUtf16le[1] { - parser.encoding = yamlUtf16leEncoding - parser.rawBufferPos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bomUtf16be[0] && buf[pos+1] == bomUtf16be[1] { - parser.encoding = yamlUtf16beEncoding - parser.rawBufferPos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bomUtf8[0] && buf[pos+1] == bomUtf8[1] && buf[pos+2] == bomUtf8[2] { - parser.encoding = yamlUtf8Encoding - parser.rawBufferPos += 3 - parser.offset += 3 - } else { - parser.encoding = yamlUtf8Encoding - } - return true -} - -// Update the raw buffer. -func yamlParserUpdateRawBuffer(parser *yamlParserT) bool { - sizeRead := 0 - - // Return if the raw buffer is full. - if parser.rawBufferPos == 0 && len(parser.rawBuffer) == cap(parser.rawBuffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.rawBufferPos > 0 && parser.rawBufferPos < len(parser.rawBuffer) { - copy(parser.rawBuffer, parser.rawBuffer[parser.rawBufferPos:]) - } - parser.rawBuffer = parser.rawBuffer[:len(parser.rawBuffer)-parser.rawBufferPos] - parser.rawBufferPos = 0 - - // Call the read handler to fill the buffer. - sizeRead, err := parser.readHandler(parser, parser.rawBuffer[len(parser.rawBuffer):cap(parser.rawBuffer)]) - parser.rawBuffer = parser.rawBuffer[:len(parser.rawBuffer)+sizeRead] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yamlParserSetReaderError(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yamlParserUpdateBuffer(parser *yamlParserT, length int) bool { - if parser.readHandler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.rawBufferPos == len(parser.rawBuffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yamlAnyEncoding { - if !yamlParserDetermineEncoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - bufferLen := len(parser.buffer) - if parser.bufferPos > 0 && parser.bufferPos < bufferLen { - copy(parser.buffer, parser.buffer[parser.bufferPos:]) - bufferLen -= parser.bufferPos - parser.bufferPos = 0 - } else if parser.bufferPos == bufferLen { - bufferLen = 0 - parser.bufferPos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.rawBufferPos == len(parser.rawBuffer) { - if !yamlParserUpdateRawBuffer(parser) { - parser.buffer = parser.buffer[:bufferLen] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.rawBufferPos != len(parser.rawBuffer) { - var value rune - var width int - - rawUnread := len(parser.rawBuffer) - parser.rawBufferPos - - // Decode the next character. - switch parser.encoding { - case yamlUtf8Encoding: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.rawBuffer[parser.rawBufferPos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yamlParserSetReaderError(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > rawUnread { - if parser.eof { - return yamlParserSetReaderError(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.rawBuffer[parser.rawBufferPos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yamlParserSetReaderError(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yamlParserSetReaderError(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yamlParserSetReaderError(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yamlUtf16leEncoding, yamlUtf16beEncoding: - var low, high int - if parser.encoding == yamlUtf16leEncoding { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if rawUnread < 2 { - if parser.eof { - return yamlParserSetReaderError(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.rawBuffer[parser.rawBufferPos+low]) + - (rune(parser.rawBuffer[parser.rawBufferPos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yamlParserSetReaderError(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if rawUnread < 4 { - if parser.eof { - return yamlParserSetReaderError(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.rawBuffer[parser.rawBufferPos+low+2]) + - (rune(parser.rawBuffer[parser.rawBufferPos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yamlParserSetReaderError(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yamlParserSetReaderError(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.rawBufferPos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[bufferLen+0] = byte(value) - bufferLen++ - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[bufferLen+0] = byte(0xC0 + (value >> 6)) - parser.buffer[bufferLen+1] = byte(0x80 + (value & 0x3F)) - bufferLen += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[bufferLen+0] = byte(0xE0 + (value >> 12)) - parser.buffer[bufferLen+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[bufferLen+2] = byte(0x80 + (value & 0x3F)) - bufferLen += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[bufferLen+0] = byte(0xF0 + (value >> 18)) - parser.buffer[bufferLen+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[bufferLen+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[bufferLen+3] = byte(0x80 + (value & 0x3F)) - bufferLen += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[bufferLen] = 0 - bufferLen++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for bufferLen < length { - parser.buffer[bufferLen] = 0 - bufferLen++ - } - parser.buffer = parser.buffer[:bufferLen] - return true -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/resolve.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/resolve.go deleted file mode 100644 index 91ba4c888..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yamlBoolTag, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yamlBoolTag, []string{"true", "True", "TRUE"}}, - {true, yamlBoolTag, []string{"on", "On", "ON"}}, - {false, yamlBoolTag, []string{"n", "N", "no", "No", "NO"}}, - {false, yamlBoolTag, []string{"false", "False", "FALSE"}}, - {false, yamlBoolTag, []string{"off", "Off", "OFF"}}, - {nil, yamlNullTag, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yamlFloatTag, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yamlFloatTag, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yamlFloatTag, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yamlFloatTag, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yamlMergeTag, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yamlStrTag, yamlBoolTag, yamlIntTag, yamlFloatTag, yamlNullTag, yamlTimestampTag: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yamlStrTag, yamlBinaryTag: - return - case yamlFloatTag: - if rtag == yamlIntTag { - switch v := out.(type) { - case int64: - rtag = yamlFloatTag - out = float64(v) - return - case int: - rtag = yamlFloatTag - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yamlStrTag && tag != yamlBinaryTag { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yamlFloatTag, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yamlTimestampTag { - t, ok := parseTimestamp(in) - if ok { - return yamlTimestampTag, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yamlIntTag, int(intv) - } - return yamlIntTag, intv - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yamlIntTag, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yamlFloatTag, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yamlIntTag, int(intv) - } - return yamlIntTag, intv - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yamlIntTag, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yamlIntTag, int(intv) - } - return yamlIntTag, intv - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yamlStrTag, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/scannerc.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/scannerc.go deleted file mode 100644 index 8662b99d3..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/scannerc.go +++ /dev/null @@ -1,2717 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yamlParserT, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yamlParserUpdateBuffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yamlParserT) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.bufferPos += width(parser.buffer[parser.bufferPos]) -} - -func skipLine(parser *yamlParserT) { - if isCrlf(parser.buffer, parser.bufferPos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.bufferPos += 2 - } else if isBreak(parser.buffer, parser.bufferPos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.bufferPos += width(parser.buffer[parser.bufferPos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yamlParserT, s []byte) []byte { - w := width(parser.buffer[parser.bufferPos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.bufferPos] - parser.bufferPos++ - } else { - s = append(s, parser.buffer[parser.bufferPos:parser.bufferPos+w]...) - parser.bufferPos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func readLine(parser *yamlParserT, s []byte) []byte { - buf := parser.buffer - pos := parser.bufferPos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.bufferPos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.bufferPos++ - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.bufferPos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.bufferPos:pos+3]...) - parser.bufferPos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yamlParserScan(parser *yamlParserT, token *yamlTokenT) bool { - // Erase the token object. - *token = yamlTokenT{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.streamEndProduced || parser.error != yamlNoError { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.tokenAvailable { - if !yamlParserFetchMoreTokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokensHead] - parser.tokensHead++ - parser.tokensParsed++ - parser.tokenAvailable = false - - if token.typ == yamlStreamEndToken { - parser.streamEndProduced = true - } - return true -} - -// Set the scanner error and return false. -func yamlParserSetScannerError(parser *yamlParserT, context string, contextMark yamlMarkT, problem string) bool { - parser.error = yamlScannerError - parser.context = context - parser.contextMark = contextMark - parser.problem = problem - parser.problemMark = parser.mark - return false -} - -func yamlParserSetScannerTagError(parser *yamlParserT, directive bool, contextMark yamlMarkT, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yamlParserSetScannerError(parser, context, contextMark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yamlParserFetchMoreTokens(parser *yamlParserT) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - needMoreTokens := false - - if parser.tokensHead == len(parser.tokens) { - // Queue is empty. - needMoreTokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yamlParserStaleSimpleKeys(parser) { - return false - } - - for i := range parser.simpleKeys { - simpleKey := &parser.simpleKeys[i] - if simpleKey.possible && simpleKey.tokenNumber == parser.tokensParsed { - needMoreTokens = true - break - } - } - } - - // We are finished. - if !needMoreTokens { - break - } - // Fetch the next token. - if !yamlParserFetchNextToken(parser) { - return false - } - } - - parser.tokenAvailable = true - return true -} - -// The dispatcher for token fetchers. -func yamlParserFetchNextToken(parser *yamlParserT) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.streamStartProduced { - return yamlParserFetchStreamStart(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yamlParserScanToNextToken(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yamlParserStaleSimpleKeys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yamlParserUnrollIndent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yamlParserUpdateBuffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if isZ(parser.buffer, parser.bufferPos) { - return yamlParserFetchStreamEnd(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.bufferPos] == '%' { - return yamlParserFetchDirective(parser) - } - - buf := parser.buffer - pos := parser.bufferPos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && isBlankz(buf, pos+3) { - return yamlParserFetchDocumentIndicator(parser, yamlDocumentStartToken) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && isBlankz(buf, pos+3) { - return yamlParserFetchDocumentIndicator(parser, yamlDocumentEndToken) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yamlParserFetchFlowCollectionStart(parser, yamlFlowSequenceStartToken) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.bufferPos] == '{' { - return yamlParserFetchFlowCollectionStart(parser, yamlFlowMappingStartToken) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.bufferPos] == ']' { - return yamlParserFetchFlowCollectionEnd(parser, - yamlFlowSequenceEndToken) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.bufferPos] == '}' { - return yamlParserFetchFlowCollectionEnd(parser, - yamlFlowMappingEndToken) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.bufferPos] == ',' { - return yamlParserFetchFlowEntry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.bufferPos] == '-' && isBlankz(parser.buffer, parser.bufferPos+1) { - return yamlParserFetchBlockEntry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.bufferPos] == '?' && (parser.flowLevel > 0 || isBlankz(parser.buffer, parser.bufferPos+1)) { - return yamlParserFetchKey(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.bufferPos] == ':' && (parser.flowLevel > 0 || isBlankz(parser.buffer, parser.bufferPos+1)) { - return yamlParserFetchValue(parser) - } - - // Is it an alias? - if parser.buffer[parser.bufferPos] == '*' { - return yamlParserFetchAnchor(parser, yamlAliasToken) - } - - // Is it an anchor? - if parser.buffer[parser.bufferPos] == '&' { - return yamlParserFetchAnchor(parser, yamlAnchorToken) - } - - // Is it a tag? - if parser.buffer[parser.bufferPos] == '!' { - return yamlParserFetchTag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.bufferPos] == '|' && parser.flowLevel == 0 { - return yamlParserFetchBlockScalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.bufferPos] == '>' && parser.flowLevel == 0 { - return yamlParserFetchBlockScalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.bufferPos] == '\'' { - return yamlParserFetchFlowScalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.bufferPos] == '"' { - return yamlParserFetchFlowScalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(isBlankz(parser.buffer, parser.bufferPos) || parser.buffer[parser.bufferPos] == '-' || - parser.buffer[parser.bufferPos] == '?' || parser.buffer[parser.bufferPos] == ':' || - parser.buffer[parser.bufferPos] == ',' || parser.buffer[parser.bufferPos] == '[' || - parser.buffer[parser.bufferPos] == ']' || parser.buffer[parser.bufferPos] == '{' || - parser.buffer[parser.bufferPos] == '}' || parser.buffer[parser.bufferPos] == '#' || - parser.buffer[parser.bufferPos] == '&' || parser.buffer[parser.bufferPos] == '*' || - parser.buffer[parser.bufferPos] == '!' || parser.buffer[parser.bufferPos] == '|' || - parser.buffer[parser.bufferPos] == '>' || parser.buffer[parser.bufferPos] == '\'' || - parser.buffer[parser.bufferPos] == '"' || parser.buffer[parser.bufferPos] == '%' || - parser.buffer[parser.bufferPos] == '@' || parser.buffer[parser.bufferPos] == '`') || - (parser.buffer[parser.bufferPos] == '-' && !isBlank(parser.buffer, parser.bufferPos+1)) || - (parser.flowLevel == 0 && - (parser.buffer[parser.bufferPos] == '?' || parser.buffer[parser.bufferPos] == ':') && - !isBlankz(parser.buffer, parser.bufferPos+1)) { - return yamlParserFetchPlainScalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yamlParserSetScannerError(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yamlParserStaleSimpleKeys(parser *yamlParserT) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simpleKeys { - simpleKey := &parser.simpleKeys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simpleKey.possible && (simpleKey.mark.line < parser.mark.line || simpleKey.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simpleKey.required { - return yamlParserSetScannerError(parser, - "while scanning a simple key", simpleKey.mark, - "could not find expected ':'") - } - simpleKey.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yamlParserSaveSimpleKey(parser *yamlParserT) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flowLevel == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simpleKeyAllowed { - simpleKey := yamlSimpleKeyT{ - possible: true, - required: required, - tokenNumber: parser.tokensParsed + (len(parser.tokens) - parser.tokensHead), - } - simpleKey.mark = parser.mark - - if !yamlParserRemoveSimpleKey(parser) { - return false - } - parser.simpleKeys[len(parser.simpleKeys)-1] = simpleKey - } - return true -} - -// Remove a potential simple key at the current flow level. -func yamlParserRemoveSimpleKey(parser *yamlParserT) bool { - i := len(parser.simpleKeys) - 1 - if parser.simpleKeys[i].possible { - // If the key is required, it is an error. - if parser.simpleKeys[i].required { - return yamlParserSetScannerError(parser, - "while scanning a simple key", parser.simpleKeys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simpleKeys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yamlParserIncreaseFlowLevel(parser *yamlParserT) bool { - // Reset the simple key on the next level. - parser.simpleKeys = append(parser.simpleKeys, yamlSimpleKeyT{}) - - // Increase the flow level. - parser.flowLevel++ - return true -} - -// Decrease the flow level. -func yamlParserDecreaseFlowLevel(parser *yamlParserT) bool { - if parser.flowLevel > 0 { - parser.flowLevel-- - parser.simpleKeys = parser.simpleKeys[:len(parser.simpleKeys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yamlParserRollIndent(parser *yamlParserT, column, number int, typ yamlTokenTypeT, mark yamlMarkT) bool { - // In the flow context, do nothing. - if parser.flowLevel > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yamlTokenT{ - typ: typ, - startMark: mark, - endMark: mark, - } - if number > -1 { - number -= parser.tokensParsed - } - yamlInsertToken(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yamlParserUnrollIndent(parser *yamlParserT, column int) bool { - // In the flow context, do nothing. - if parser.flowLevel > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yamlTokenT{ - typ: yamlBlockEndToken, - startMark: parser.mark, - endMark: parser.mark, - } - yamlInsertToken(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yamlParserFetchStreamStart(parser *yamlParserT) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simpleKeys = append(parser.simpleKeys, yamlSimpleKeyT{}) - - // A simple key is allowed at the beginning of the stream. - parser.simpleKeyAllowed = true - - // We have started. - parser.streamStartProduced = true - - // Create the STREAM-START token and append it to the queue. - token := yamlTokenT{ - typ: yamlStreamStartToken, - startMark: parser.mark, - endMark: parser.mark, - encoding: parser.encoding, - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yamlParserFetchStreamEnd(parser *yamlParserT) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yamlParserUnrollIndent(parser, -1) { - return false - } - - // Reset simple keys. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - parser.simpleKeyAllowed = false - - // Create the STREAM-END token and append it to the queue. - token := yamlTokenT{ - typ: yamlStreamEndToken, - startMark: parser.mark, - endMark: parser.mark, - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yamlParserFetchDirective(parser *yamlParserT) bool { - // Reset the indentation level. - if !yamlParserUnrollIndent(parser, -1) { - return false - } - - // Reset simple keys. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - parser.simpleKeyAllowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yamlTokenT{} - if !yamlParserScanDirective(parser, &token) { - return false - } - // Append the token to the queue. - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yamlParserFetchDocumentIndicator(parser *yamlParserT, typ yamlTokenTypeT) bool { - // Reset the indentation level. - if !yamlParserUnrollIndent(parser, -1) { - return false - } - - // Reset simple keys. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - parser.simpleKeyAllowed = false - - // Consume the token. - startMark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - endMark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yamlTokenT{ - typ: typ, - startMark: startMark, - endMark: endMark, - } - // Append the token to the queue. - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yamlParserFetchFlowCollectionStart(parser *yamlParserT, typ yamlTokenTypeT) bool { - // The indicators '[' and '{' may start a simple key. - if !yamlParserSaveSimpleKey(parser) { - return false - } - - // Increase the flow level. - if !yamlParserIncreaseFlowLevel(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simpleKeyAllowed = true - - // Consume the token. - startMark := parser.mark - skip(parser) - endMark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yamlTokenT{ - typ: typ, - startMark: startMark, - endMark: endMark, - } - // Append the token to the queue. - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yamlParserFetchFlowCollectionEnd(parser *yamlParserT, typ yamlTokenTypeT) bool { - // Reset any potential simple key on the current flow level. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - // Decrease the flow level. - if !yamlParserDecreaseFlowLevel(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simpleKeyAllowed = false - - // Consume the token. - - startMark := parser.mark - skip(parser) - endMark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yamlTokenT{ - typ: typ, - startMark: startMark, - endMark: endMark, - } - // Append the token to the queue. - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yamlParserFetchFlowEntry(parser *yamlParserT) bool { - // Reset any potential simple keys on the current flow level. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simpleKeyAllowed = true - - // Consume the token. - startMark := parser.mark - skip(parser) - endMark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yamlTokenT{ - typ: yamlFlowEntryToken, - startMark: startMark, - endMark: endMark, - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yamlParserFetchBlockEntry(parser *yamlParserT) bool { - // Check if the scanner is in the block context. - if parser.flowLevel == 0 { - // Check if we are allowed to start a new entry. - if !parser.simpleKeyAllowed { - return yamlParserSetScannerError(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yamlParserRollIndent(parser, parser.mark.column, -1, yamlBlockSequenceStartToken, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simpleKeyAllowed = true - - // Consume the token. - startMark := parser.mark - skip(parser) - endMark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yamlTokenT{ - typ: yamlBlockEntryToken, - startMark: startMark, - endMark: endMark, - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yamlParserFetchKey(parser *yamlParserT) bool { - - // In the block context, additional checks are required. - if parser.flowLevel == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simpleKeyAllowed { - return yamlParserSetScannerError(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yamlParserRollIndent(parser, parser.mark.column, -1, yamlBlockMappingStartToken, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simpleKeyAllowed = parser.flowLevel == 0 - - // Consume the token. - startMark := parser.mark - skip(parser) - endMark := parser.mark - - // Create the KEY token and append it to the queue. - token := yamlTokenT{ - typ: yamlKeyToken, - startMark: startMark, - endMark: endMark, - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yamlParserFetchValue(parser *yamlParserT) bool { - - simpleKey := &parser.simpleKeys[len(parser.simpleKeys)-1] - - // Have we found a simple key? - if simpleKey.possible { - // Create the KEY token and insert it into the queue. - token := yamlTokenT{ - typ: yamlKeyToken, - startMark: simpleKey.mark, - endMark: simpleKey.mark, - } - yamlInsertToken(parser, simpleKey.tokenNumber-parser.tokensParsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yamlParserRollIndent(parser, simpleKey.mark.column, - simpleKey.tokenNumber, - yamlBlockMappingStartToken, simpleKey.mark) { - return false - } - - // Remove the simple key. - simpleKey.possible = false - - // A simple key cannot follow another simple key. - parser.simpleKeyAllowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flowLevel == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simpleKeyAllowed { - return yamlParserSetScannerError(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yamlParserRollIndent(parser, parser.mark.column, -1, yamlBlockMappingStartToken, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simpleKeyAllowed = parser.flowLevel == 0 - } - - // Consume the token. - startMark := parser.mark - skip(parser) - endMark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yamlTokenT{ - typ: yamlValueToken, - startMark: startMark, - endMark: endMark, - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yamlParserFetchAnchor(parser *yamlParserT, typ yamlTokenTypeT) bool { - // An anchor or an alias could be a simple key. - if !yamlParserSaveSimpleKey(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simpleKeyAllowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yamlTokenT - if !yamlParserScanAnchor(parser, &token, typ) { - return false - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yamlParserFetchTag(parser *yamlParserT) bool { - // A tag could be a simple key. - if !yamlParserSaveSimpleKey(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simpleKeyAllowed = false - - // Create the TAG token and append it to the queue. - var token yamlTokenT - if !yamlParserScanTag(parser, &token) { - return false - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yamlParserFetchBlockScalar(parser *yamlParserT, literal bool) bool { - // Remove any potential simple keys. - if !yamlParserRemoveSimpleKey(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simpleKeyAllowed = true - - // Create the SCALAR token and append it to the queue. - var token yamlTokenT - if !yamlParserScanBlockScalar(parser, &token, literal) { - return false - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yamlParserFetchFlowScalar(parser *yamlParserT, single bool) bool { - // A plain scalar could be a simple key. - if !yamlParserSaveSimpleKey(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simpleKeyAllowed = false - - // Create the SCALAR token and append it to the queue. - var token yamlTokenT - if !yamlParserScanFlowScalar(parser, &token, single) { - return false - } - yamlInsertToken(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yamlParserFetchPlainScalar(parser *yamlParserT) bool { - // A plain scalar could be a simple key. - if !yamlParserSaveSimpleKey(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simpleKeyAllowed = false - - // Create the SCALAR token and append it to the queue. - var token yamlTokenT - if !yamlParserScanPlainScalar(parser, &token) { - return false - } - yamlInsertToken(parser, -1, &token) - return true -} - -func yamlParserFetchComment(parser *yamlParserT) bool { - startMark := parser.mark - var comment []byte - - for !isBreakz(parser.buffer, parser.bufferPos) { - comment = read(parser, comment) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - break - } - } - - parser.comments = append(parser.comments, yamlTokenT{ - typ: yamlCommentToken, - startMark: startMark, - endMark: parser.mark, - value: comment[1:], // skip # - style: yamlPlainScalarStyle, - }) - - return true -} - -// Eat whitespaces and comments until the next token is found. -func yamlParserScanToNextToken(parser *yamlParserT) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if parser.mark.column == 0 && isBom(parser.buffer, parser.bufferPos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - for parser.buffer[parser.bufferPos] == ' ' || ((parser.flowLevel > 0 || !parser.simpleKeyAllowed) && parser.buffer[parser.bufferPos] == '\t') { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.bufferPos] == '#' { - if !yamlParserFetchComment(parser) { - return false - } - } - - // If it is a line break, eat it. - if isBreak(parser.buffer, parser.bufferPos) { - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - skipLine(parser) - - // In the block context, a new line may start a simple key. - if parser.flowLevel == 0 { - parser.simpleKeyAllowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yamlParserScanDirective(parser *yamlParserT, token *yamlTokenT) bool { - // Eat '%'. - startMark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yamlParserScanDirectiveName(parser, startMark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yamlParserScanVersionDirectiveValue(parser, startMark, &major, &minor) { - return false - } - endMark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yamlTokenT{ - typ: yamlVersionDirectiveToken, - startMark: startMark, - endMark: endMark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yamlParserScanTagDirectiveValue(parser, startMark, &handle, &prefix) { - return false - } - endMark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yamlTokenT{ - typ: yamlTagDirectiveToken, - startMark: startMark, - endMark: endMark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yamlParserSetScannerError(parser, "while scanning a directive", - startMark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - for isBlank(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.bufferPos] == '#' { - for !isBreakz(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !isBreakz(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a directive", - startMark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if isBreak(parser.buffer, parser.bufferPos) { - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - skipLine(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yamlParserScanDirectiveName(parser *yamlParserT, startMark yamlMarkT, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - var s []byte - for isAlpha(parser.buffer, parser.bufferPos) { - s = read(parser, s) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yamlParserSetScannerError(parser, "while scanning a directive", - startMark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !isBlankz(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a directive", - startMark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yamlParserScanVersionDirectiveValue(parser *yamlParserT, startMark yamlMarkT, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - for isBlank(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yamlParserScanVersionDirectiveNumber(parser, startMark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.bufferPos] != '.' { - return yamlParserSetScannerError(parser, "while scanning a %YAML directive", - startMark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yamlParserScanVersionDirectiveNumber(parser, startMark, minor) { - return false - } - return true -} - -const maxNumberLength = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yamlParserScanVersionDirectiveNumber(parser *yamlParserT, startMark yamlMarkT, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - var value, length int8 - for isDigit(parser.buffer, parser.bufferPos) { - // Check if the number is too long. - length++ - if length > maxNumberLength { - return yamlParserSetScannerError(parser, "while scanning a %YAML directive", - startMark, "found extremely long version number") - } - value = value*10 + int8(asDigit(parser.buffer, parser.bufferPos)) - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yamlParserSetScannerError(parser, "while scanning a %YAML directive", - startMark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yamlParserScanTagDirectiveValue(parser *yamlParserT, startMark yamlMarkT, handle, prefix *[]byte) bool { - var handleValue, prefixValue []byte - - // Eat whitespaces. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - for isBlank(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yamlParserScanTagHandle(parser, true, startMark, &handleValue) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if !isBlank(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a %TAG directive", - startMark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for isBlank(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yamlParserScanTagURI(parser, true, nil, startMark, &prefixValue) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if !isBlankz(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a %TAG directive", - startMark, "did not find expected whitespace or line break") - return false - } - - *handle = handleValue - *prefix = prefixValue - return true -} - -func yamlParserScanAnchor(parser *yamlParserT, token *yamlTokenT, typ yamlTokenTypeT) bool { - var s []byte - - // Eat the indicator character. - startMark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - for isAlpha(parser.buffer, parser.bufferPos) { - s = read(parser, s) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - endMark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(isBlankz(parser.buffer, parser.bufferPos) || parser.buffer[parser.bufferPos] == '?' || - parser.buffer[parser.bufferPos] == ':' || parser.buffer[parser.bufferPos] == ',' || - parser.buffer[parser.bufferPos] == ']' || parser.buffer[parser.bufferPos] == '}' || - parser.buffer[parser.bufferPos] == '%' || parser.buffer[parser.bufferPos] == '@' || - parser.buffer[parser.bufferPos] == '`') { - context := "while scanning an alias" - if typ == yamlAnchorToken { - context = "while scanning an anchor" - } - yamlParserSetScannerError(parser, context, startMark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yamlTokenT{ - typ: typ, - startMark: startMark, - endMark: endMark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yamlParserScanTag(parser *yamlParserT, token *yamlTokenT) bool { - var handle, suffix []byte - - startMark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - - if parser.buffer[parser.bufferPos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yamlParserScanTagURI(parser, false, nil, startMark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.bufferPos] != '>' { - yamlParserSetScannerError(parser, "while scanning a tag", - startMark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yamlParserScanTagHandle(parser, false, startMark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yamlParserScanTagURI(parser, false, nil, startMark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yamlParserScanTagURI(parser, false, handle, startMark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if !isBlankz(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a tag", - startMark, "did not find expected whitespace or line break") - return false - } - - endMark := parser.mark - - // Create a token. - *token = yamlTokenT{ - typ: yamlTagToken, - startMark: startMark, - endMark: endMark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yamlParserScanTagHandle(parser *yamlParserT, directive bool, startMark yamlMarkT, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if parser.buffer[parser.bufferPos] != '!' { - yamlParserSetScannerTagError(parser, directive, - startMark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - for isAlpha(parser.buffer, parser.bufferPos) { - s = read(parser, s) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.bufferPos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yamlParserSetScannerTagError(parser, directive, - startMark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yamlParserScanTagURI(parser *yamlParserT, directive bool, head []byte, startMark yamlMarkT, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for isAlpha(parser.buffer, parser.bufferPos) || parser.buffer[parser.bufferPos] == ';' || - parser.buffer[parser.bufferPos] == '/' || parser.buffer[parser.bufferPos] == '?' || - parser.buffer[parser.bufferPos] == ':' || parser.buffer[parser.bufferPos] == '@' || - parser.buffer[parser.bufferPos] == '&' || parser.buffer[parser.bufferPos] == '=' || - parser.buffer[parser.bufferPos] == '+' || parser.buffer[parser.bufferPos] == '$' || - parser.buffer[parser.bufferPos] == ',' || parser.buffer[parser.bufferPos] == '.' || - parser.buffer[parser.bufferPos] == '!' || parser.buffer[parser.bufferPos] == '~' || - parser.buffer[parser.bufferPos] == '*' || parser.buffer[parser.bufferPos] == '\'' || - parser.buffer[parser.bufferPos] == '(' || parser.buffer[parser.bufferPos] == ')' || - parser.buffer[parser.bufferPos] == '[' || parser.buffer[parser.bufferPos] == ']' || - parser.buffer[parser.bufferPos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.bufferPos] == '%' { - if !yamlParserScanURIEscapes(parser, directive, startMark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yamlParserSetScannerTagError(parser, directive, - startMark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yamlParserScanURIEscapes(parser *yamlParserT, directive bool, startMark yamlMarkT, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yamlParserUpdateBuffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.bufferPos] == '%' && - isHex(parser.buffer, parser.bufferPos+1) && - isHex(parser.buffer, parser.bufferPos+2)) { - return yamlParserSetScannerTagError(parser, directive, - startMark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((asHex(parser.buffer, parser.bufferPos+1) << 4) + asHex(parser.buffer, parser.bufferPos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yamlParserSetScannerTagError(parser, directive, - startMark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yamlParserSetScannerTagError(parser, directive, - startMark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yamlParserScanBlockScalar(parser *yamlParserT, token *yamlTokenT, literal bool) bool { - // Eat the indicator '|' or '>'. - startMark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.bufferPos] == '+' || parser.buffer[parser.bufferPos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.bufferPos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if isDigit(parser.buffer, parser.bufferPos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.bufferPos] == '0' { - yamlParserSetScannerError(parser, "while scanning a block scalar", - startMark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = asDigit(parser.buffer, parser.bufferPos) - skip(parser) - } - - } else if isDigit(parser.buffer, parser.bufferPos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.bufferPos] == '0' { - yamlParserSetScannerError(parser, "while scanning a block scalar", - startMark, "found an indentation indicator equal to 0") - return false - } - increment = asDigit(parser.buffer, parser.bufferPos) - skip(parser) - - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - if parser.buffer[parser.bufferPos] == '+' || parser.buffer[parser.bufferPos] == '-' { - if parser.buffer[parser.bufferPos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - for isBlank(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - if parser.buffer[parser.bufferPos] == '#' { - for !isBreakz(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !isBreakz(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a block scalar", - startMark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if isBreak(parser.buffer, parser.bufferPos) { - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - skipLine(parser) - } - - endMark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leadingBreak, trailingBreaks []byte - if !yamlParserScanBlockScalarBreaks(parser, &indent, &trailingBreaks, startMark, &endMark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - var leadingBlank, trailingBlank bool - for parser.mark.column == indent && !isZ(parser.buffer, parser.bufferPos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailingBlank = isBlank(parser.buffer, parser.bufferPos) - - // Check if we need to fold the leading line break. - if !literal && !leadingBlank && !trailingBlank && len(leadingBreak) > 0 && leadingBreak[0] == '\n' { - // Do we need to join the lines by space? - if len(trailingBreaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leadingBreak...) - } - leadingBreak = leadingBreak[:0] - - // Append the remaining line breaks. - s = append(s, trailingBreaks...) - trailingBreaks = trailingBreaks[:0] - - // Is it a leading whitespace? - leadingBlank = isBlank(parser.buffer, parser.bufferPos) - - // Consume the current line. - for !isBreakz(parser.buffer, parser.bufferPos) { - s = read(parser, s) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - - leadingBreak = readLine(parser, leadingBreak) - - // Eat the following indentation spaces and line breaks. - if !yamlParserScanBlockScalarBreaks(parser, &indent, &trailingBreaks, startMark, &endMark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leadingBreak...) - } - if chomping == 1 { - s = append(s, trailingBreaks...) - } - - // Create a token. - *token = yamlTokenT{ - typ: yamlScalarToken, - startMark: startMark, - endMark: endMark, - value: s, - style: yamlLiteralScalarStyle, - } - if !literal { - token.style = yamlFoldedScalarStyle - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yamlParserScanBlockScalarBreaks(parser *yamlParserT, indent *int, breaks *[]byte, startMark yamlMarkT, endMark *yamlMarkT) bool { - *endMark = parser.mark - - // Eat the indentation spaces and line breaks. - maxIndent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && isSpace(parser.buffer, parser.bufferPos) { - skip(parser) - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - if parser.mark.column > maxIndent { - maxIndent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && isTab(parser.buffer, parser.bufferPos) { - return yamlParserSetScannerError(parser, "while scanning a block scalar", - startMark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !isBreak(parser.buffer, parser.bufferPos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = readLine(parser, *breaks) - *endMark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = maxIndent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yamlParserScanFlowScalar(parser *yamlParserT, token *yamlTokenT, single bool) bool { - // Eat the left quote. - startMark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leadingBreak, trailingBreaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yamlParserUpdateBuffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.bufferPos+0] == '-' && - parser.buffer[parser.bufferPos+1] == '-' && - parser.buffer[parser.bufferPos+2] == '-') || - (parser.buffer[parser.bufferPos+0] == '.' && - parser.buffer[parser.bufferPos+1] == '.' && - parser.buffer[parser.bufferPos+2] == '.')) && - isBlankz(parser.buffer, parser.bufferPos+3) { - yamlParserSetScannerError(parser, "while scanning a quoted scalar", - startMark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if isZ(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a quoted scalar", - startMark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leadingBlanks := false - for !isBlankz(parser.buffer, parser.bufferPos) { - if single && parser.buffer[parser.bufferPos] == '\'' && parser.buffer[parser.bufferPos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.bufferPos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.bufferPos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.bufferPos] == '\\' && isBreak(parser.buffer, parser.bufferPos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yamlParserUpdateBuffer(parser, 3) { - return false - } - skip(parser) - skipLine(parser) - leadingBlanks = true - break - - } else if !single && parser.buffer[parser.bufferPos] == '\\' { - // It is an escape sequence. - codeLength := 0 - - // Check the escape character. - switch parser.buffer[parser.bufferPos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - codeLength = 2 - case 'u': - codeLength = 4 - case 'U': - codeLength = 8 - default: - yamlParserSetScannerError(parser, "while parsing a quoted scalar", - startMark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if codeLength > 0 { - var value int - - // Scan the character value. - if parser.unread < codeLength && !yamlParserUpdateBuffer(parser, codeLength) { - return false - } - for k := 0; k < codeLength; k++ { - if !isHex(parser.buffer, parser.bufferPos+k) { - yamlParserSetScannerError(parser, "while parsing a quoted scalar", - startMark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + asHex(parser.buffer, parser.bufferPos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yamlParserSetScannerError(parser, "while parsing a quoted scalar", - startMark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < codeLength; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.bufferPos] == '\'' { - break - } - } else { - if parser.buffer[parser.bufferPos] == '"' { - break - } - } - - // Consume blank characters. - for isBlank(parser.buffer, parser.bufferPos) || isBreak(parser.buffer, parser.bufferPos) { - if isBlank(parser.buffer, parser.bufferPos) { - // Consume a space or a tab character. - if !leadingBlanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leadingBlanks { - whitespaces = whitespaces[:0] - leadingBreak = readLine(parser, leadingBreak) - leadingBlanks = true - } else { - trailingBreaks = readLine(parser, trailingBreaks) - } - } - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leadingBlanks { - // Do we need to fold line breaks? - if len(leadingBreak) > 0 && leadingBreak[0] == '\n' { - if len(trailingBreaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailingBreaks...) - } - } else { - s = append(s, leadingBreak...) - s = append(s, trailingBreaks...) - } - trailingBreaks = trailingBreaks[:0] - leadingBreak = leadingBreak[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - endMark := parser.mark - - // Create a token. - *token = yamlTokenT{ - typ: yamlScalarToken, - startMark: startMark, - endMark: endMark, - value: s, - style: yamlSingleQuotedScalarStyle, - } - if !single { - token.style = yamlDoubleQuotedScalarStyle - } - return true -} - -// Scan a plain scalar. -func yamlParserScanPlainScalar(parser *yamlParserT, token *yamlTokenT) bool { - - var s, leadingBreak, trailingBreaks, whitespaces []byte - var leadingBlanks bool - var indent = parser.indent + 1 - - startMark := parser.mark - endMark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yamlParserUpdateBuffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.bufferPos+0] == '-' && - parser.buffer[parser.bufferPos+1] == '-' && - parser.buffer[parser.bufferPos+2] == '-') || - (parser.buffer[parser.bufferPos+0] == '.' && - parser.buffer[parser.bufferPos+1] == '.' && - parser.buffer[parser.bufferPos+2] == '.')) && - isBlankz(parser.buffer, parser.bufferPos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.bufferPos] == '#' { - break - } - - // Consume non-blank characters. - for !isBlankz(parser.buffer, parser.bufferPos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.bufferPos] == ':' && isBlankz(parser.buffer, parser.bufferPos+1)) || - (parser.flowLevel > 0 && - (parser.buffer[parser.bufferPos] == ',' || - parser.buffer[parser.bufferPos] == '?' || parser.buffer[parser.bufferPos] == '[' || - parser.buffer[parser.bufferPos] == ']' || parser.buffer[parser.bufferPos] == '{' || - parser.buffer[parser.bufferPos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leadingBlanks || len(whitespaces) > 0 { - if leadingBlanks { - // Do we need to fold line breaks? - if leadingBreak[0] == '\n' { - if len(trailingBreaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailingBreaks...) - } - } else { - s = append(s, leadingBreak...) - s = append(s, trailingBreaks...) - } - trailingBreaks = trailingBreaks[:0] - leadingBreak = leadingBreak[:0] - leadingBlanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - endMark = parser.mark - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(isBlank(parser.buffer, parser.bufferPos) || isBreak(parser.buffer, parser.bufferPos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - - for isBlank(parser.buffer, parser.bufferPos) || isBreak(parser.buffer, parser.bufferPos) { - if isBlank(parser.buffer, parser.bufferPos) { - - // Check for tab characters that abuse indentation. - if leadingBlanks && parser.mark.column < indent && isTab(parser.buffer, parser.bufferPos) { - yamlParserSetScannerError(parser, "while scanning a plain scalar", - startMark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leadingBlanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yamlParserUpdateBuffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leadingBlanks { - whitespaces = whitespaces[:0] - leadingBreak = readLine(parser, leadingBreak) - leadingBlanks = true - } else { - trailingBreaks = readLine(parser, trailingBreaks) - } - } - if parser.unread < 1 && !yamlParserUpdateBuffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flowLevel == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yamlTokenT{ - typ: yamlScalarToken, - startMark: startMark, - endMark: endMark, - value: s, - style: yamlPlainScalarStyle, - } - - // Note that we change the 'simple_key_allowed' flag. - if leadingBlanks { - parser.simpleKeyAllowed = true - } - return true -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/sorter.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/sorter.go deleted file mode 100644 index 25a1c25d8..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/sorter.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/strict_scalar_resolve.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/strict_scalar_resolve.go deleted file mode 100644 index 30e820293..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/strict_scalar_resolve.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "unicode" -) - -var ( - _ = fmt.Printf // debug - strictStyleInt = regexp.MustCompile(`^\-?(0|[1-9][0-9]*)$`) - strictStyleFloat = regexp.MustCompile(`^\-?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][-+]?[0-9]+)?$`) -) - -func strictScalarResolve(tag, in string) (string, interface{}) { - // fmt.Printf("resolve: '%s' '%s'\n", tag, in) - - nativeTag, nativeVal := resolve(tag, in) - if len(tag) > 0 { - return nativeTag, nativeVal - } - - conTag, conVal := strictScalarResolveConservative(in) - - if conTag != nativeTag { - failf("Strict parsing: Found '%s' ambigious (could be %s or %s)", - in, shortTag(conTag), shortTag(nativeTag)) - panic("Unreachable") - } - - if !reflect.DeepEqual(conVal, nativeVal) { - failf("Strict parsing: Found '%s' ambigious (could be '%s' or '%s')", - in, conVal, nativeVal) - panic("Unreachable") - } - - return conTag, conVal -} - -func strictScalarResolveConservative(in string) (string, interface{}) { - switch in { - case "": - return yamlNullTag, nil - case "true": - return yamlBoolTag, true - case "false": - return yamlBoolTag, false - - default: - switch { - case strictStyleInt.MatchString(in): - intv, err := strconv.ParseInt(in, 0, 64) - if err != nil { - uintv, err := strconv.ParseUint(in, 0, 64) - if err == nil { - return yamlIntTag, uintv - } - - failf("Strict parsing: Parsing int '%s': %s", in, err) - panic("Unreachable") - } - if intv == int64(int(intv)) { - return yamlIntTag, int(intv) - } - return yamlIntTag, intv - - case strictStyleFloat.MatchString(in): - floatv, err := strconv.ParseFloat(in, 64) - if err != nil { - failf("Strict parsing: Parsing float '%s': %s", in, err) - panic("Unreachable") - } - return yamlFloatTag, floatv - - case strings.IndexFunc(in, unicode.IsSpace) != -1: - failf("Strict parsing: Strings with whitespace must be explicitly quoted: '%s'", in) - panic("Unreachable") - - case strings.Contains(in, ":"): - failf("Strict parsing: Strings with colon must be explicitly quoted: '%s'", in) - panic("Unreachable") - - // Catch missing new line before document start - case strings.Contains(in, "---"): - failf("Strict parsing: Strings with triple-dash must be explicitly quoted: '%s'", in) - panic("Unreachable") - - default: - return yamlStrTag, in - } - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/writerc.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/writerc.go deleted file mode 100644 index 1decf0f0f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/writerc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -// Set the writer error and return false. -func yamlEmitterSetWriterError(emitter *yamlEmitterT, problem string) bool { - emitter.error = yamlWriterError - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yamlEmitterFlush(emitter *yamlEmitterT) bool { - if emitter.writeHandler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.bufferPos == 0 { - return true - } - - if err := emitter.writeHandler(emitter, emitter.buffer[:emitter.bufferPos]); err != nil { - return yamlEmitterSetWriterError(emitter, "write error: "+err.Error()) - } - emitter.bufferPos = 0 - return true -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yaml.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yaml.go deleted file mode 100644 index 15202a6c2..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yaml.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} - Line int -} - -type ArrayItem struct { - Value interface{} - Line int -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - _, err = unmarshal(in, out, false) - return err -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - _, err = unmarshal(in, out, true) - return err -} - -func UnmarshalWithComments(in []byte, out interface{}) ([]Comment, error) { - return unmarshal(in, out, true) -} - -// A Decorder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - useMapSlice bool - parser *parser - lastDocumentStartLine *int - resolveFunc func(tag string, in string) (string, interface{}) -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -func (dec *Decoder) SetForceMapSlice(useMapSlice bool) { - dec.useMapSlice = useMapSlice -} - -func (dec *Decoder) SetStrictScalarResolve() { - dec.resolveFunc = strictScalarResolve -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - dec.lastDocumentStartLine = nil - dec.parser.parser.comments = nil - d := newDecoder(dec.strict) - if dec.useMapSlice { - d.mapType = reflect.TypeOf(MapSlice{}) - } - if dec.resolveFunc != nil { - d.resolveFunc = dec.resolveFunc - } - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - docLine := d.doc.line - dec.lastDocumentStartLine = &docLine - return nil -} - -func (dec *Decoder) DocumentStartLine() int { - if dec.lastDocumentStartLine != nil { - return *dec.lastDocumentStartLine - } - panic("document start line is not set") -} - -func (dec *Decoder) Comments() []Comment { - var comments []Comment - for _, c := range dec.parser.parser.comments { - comments = append(comments, Comment{Data: string(c.value), Line: c.startMark.line}) - } - return comments -} - -type Comment struct { - Data string - Line int -} - -func unmarshal(in []byte, out interface{}, strict bool) (comments []Comment, err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return nil, &TypeError{d.terrors} - } - for _, c := range p.parser.comments { - comments = append(comments, Comment{Data: string(c.value), Line: c.startMark.line}) - } - return comments, nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - ID int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, fmt.Errorf("Unsupported flag %q in tag %q of type %s", flag, tag, st) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.ID = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.ID = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yamlh.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yamlh.go deleted file mode 100644 index 055cdee9f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yamlh.go +++ /dev/null @@ -1,751 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yamlVersionDirectiveT struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yamlTagDirectiveT struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yamlEncodingT int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yamlAnyEncoding yamlEncodingT = iota - - yamlUtf8Encoding // The default UTF-8 encoding. - yamlUtf16leEncoding // The UTF-16-LE encoding with BOM. - yamlUtf16beEncoding // The UTF-16-BE encoding with BOM. -) - -type yamlBreakT int - -// Line break types. -const ( - // Let the parser choose the break type. - yamlAnyBreak yamlBreakT = iota - - yamlCrBreak // Use CR for line breaks (Mac style). - yamlLnBreak // Use LN for line breaks (Unix style). - yamlCrlnBreak // Use CR LN for line breaks (DOS style). -) - -type yamlErrorTypeT int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yamlNoError yamlErrorTypeT = iota - - yamlMemoryError // Cannot allocate or reallocate a block of memory. - yamlReaderError // Cannot read or decode the input stream. - yamlScannerError // Cannot scan the input stream. - yamlParserError // Cannot parse the input stream. - yamlComposerError // Cannot compose a YAML document. - yamlWriterError // Cannot write to the output stream. - yamlEmitterError // Cannot emit a YAML stream. -) - -// The pointer position. -type yamlMarkT struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yamlStyleT int8 - -type yamlScalarStyleT yamlStyleT - -// Scalar styles. -const ( - // Let the emitter choose the style. - yamlAnyScalarStyle yamlScalarStyleT = iota - - yamlPlainScalarStyle // The plain scalar style. - yamlSingleQuotedScalarStyle // The single-quoted scalar style. - yamlDoubleQuotedScalarStyle // The double-quoted scalar style. - yamlLiteralScalarStyle // The literal scalar style. - yamlFoldedScalarStyle // The folded scalar style. -) - -type yamlSequenceStyleT yamlStyleT - -// Sequence styles. -const ( - // Let the emitter choose the style. - yamlAnySequenceStyle yamlSequenceStyleT = iota - - yamlBlockSequenceStyle // The block sequence style. - yamlFlowSequenceStyle // The flow sequence style. -) - -type yamlMappingStyleT yamlStyleT - -// Mapping styles. -const ( - // Let the emitter choose the style. - yamlAnyMappingStyle yamlMappingStyleT = iota - - yamlBlockMappingStyle // The block mapping style. - yamlFlowMappingStyle // The flow mapping style. -) - -// Tokens - -type yamlTokenTypeT int - -// Token types. -const ( - // An empty token. - yamlNoToken yamlTokenTypeT = iota - - yamlStreamStartToken // A STREAM-START token. - yamlStreamEndToken // A STREAM-END token. - - yamlVersionDirectiveToken // A VERSION-DIRECTIVE token. - yamlTagDirectiveToken // A TAG-DIRECTIVE token. - yamlDocumentStartToken // A DOCUMENT-START token. - yamlDocumentEndToken // A DOCUMENT-END token. - - yamlBlockSequenceStartToken // A BLOCK-SEQUENCE-START token. - yamlBlockMappingStartToken // A BLOCK-SEQUENCE-END token. - yamlBlockEndToken // A BLOCK-END token. - - yamlFlowSequenceStartToken // A FLOW-SEQUENCE-START token. - yamlFlowSequenceEndToken // A FLOW-SEQUENCE-END token. - yamlFlowMappingStartToken // A FLOW-MAPPING-START token. - yamlFlowMappingEndToken // A FLOW-MAPPING-END token. - - yamlBlockEntryToken // A BLOCK-ENTRY token. - yamlFlowEntryToken // A FLOW-ENTRY token. - yamlKeyToken // A KEY token. - yamlValueToken // A VALUE token. - - yamlAliasToken // An ALIAS token. - yamlAnchorToken // An ANCHOR token. - yamlTagToken // A TAG token. - yamlScalarToken // A SCALAR token. - yamlCommentToken -) - -func (tt yamlTokenTypeT) String() string { - switch tt { - case yamlNoToken: - return "yaml_NO_TOKEN" - case yamlStreamStartToken: - return "yaml_STREAM_START_TOKEN" - case yamlStreamEndToken: - return "yaml_STREAM_END_TOKEN" - case yamlVersionDirectiveToken: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yamlTagDirectiveToken: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yamlDocumentStartToken: - return "yaml_DOCUMENT_START_TOKEN" - case yamlDocumentEndToken: - return "yaml_DOCUMENT_END_TOKEN" - case yamlBlockSequenceStartToken: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yamlBlockMappingStartToken: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yamlBlockEndToken: - return "yaml_BLOCK_END_TOKEN" - case yamlFlowSequenceStartToken: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yamlFlowSequenceEndToken: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yamlFlowMappingStartToken: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yamlFlowMappingEndToken: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yamlBlockEntryToken: - return "yaml_BLOCK_ENTRY_TOKEN" - case yamlFlowEntryToken: - return "yaml_FLOW_ENTRY_TOKEN" - case yamlKeyToken: - return "yaml_KEY_TOKEN" - case yamlValueToken: - return "yaml_VALUE_TOKEN" - case yamlAliasToken: - return "yaml_ALIAS_TOKEN" - case yamlAnchorToken: - return "yaml_ANCHOR_TOKEN" - case yamlTagToken: - return "yaml_TAG_TOKEN" - case yamlScalarToken: - return "yaml_SCALAR_TOKEN" - case yamlCommentToken: - return "yaml_COMMENT_TOKEN" - } - return "" -} - -// The token structure. -type yamlTokenT struct { - // The token type. - typ yamlTokenTypeT - - // The start/end of the token. - startMark, endMark yamlMarkT - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yamlEncodingT - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yamlScalarStyleT - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -func (t *yamlTokenT) String() string { - return fmt.Sprintf("Token(typ=%s, value=%s)", t.typ.String(), string(t.value)) -} - -// Events - -type yamlEventTypeT int8 - -// Event types. -const ( - // An empty event. - yamlNoEvent yamlEventTypeT = iota - - yamlStreamStartEvent // A STREAM-START event. - yamlStreamEndEvent // A STREAM-END event. - yamlDocumentStartEvent // A DOCUMENT-START event. - yamlDocumentEndEvent // A DOCUMENT-END event. - yamlAliasEvent // An ALIAS event. - yamlScalarEvent // A SCALAR event. - yamlSequenceStartEvent // A SEQUENCE-START event. - yamlSequenceEndEvent // A SEQUENCE-END event. - yamlMappingStartEvent // A MAPPING-START event. - yamlMappingEndEvent // A MAPPING-END event. -) - -var eventStrings = []string{ - yamlNoEvent: "none", - yamlStreamStartEvent: "stream start", - yamlStreamEndEvent: "stream end", - yamlDocumentStartEvent: "document start", - yamlDocumentEndEvent: "document end", - yamlAliasEvent: "alias", - yamlScalarEvent: "scalar", - yamlSequenceStartEvent: "sequence start", - yamlSequenceEndEvent: "sequence end", - yamlMappingStartEvent: "mapping start", - yamlMappingEndEvent: "mapping end", -} - -func (e yamlEventTypeT) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yamlEventT struct { - - // The event type. - typ yamlEventTypeT - - // The start and end of the event. - startMark, endMark yamlMarkT - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yamlEncodingT - - // The version directive (for yaml_DOCUMENT_START_EVENT). - versionDirective *yamlVersionDirectiveT - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tagDirectives []yamlTagDirectiveT - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quotedImplicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yamlStyleT -} - -func (e *yamlEventT) scalarStyle() yamlScalarStyleT { return yamlScalarStyleT(e.style) } -func (e *yamlEventT) sequenceStyle() yamlSequenceStyleT { return yamlSequenceStyleT(e.style) } -func (e *yamlEventT) mappingStyle() yamlMappingStyleT { return yamlMappingStyleT(e.style) } - -// Nodes - -const ( - yamlNullTag = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yamlBoolTag = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yamlStrTag = "tag:yaml.org,2002:str" // The tag !!str for string values. - yamlIntTag = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yamlFloatTag = "tag:yaml.org,2002:float" // The tag !!float for float values. - yamlTimestampTag = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yamlSeqTag = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yamlMapTag = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yamlBinaryTag = "tag:yaml.org,2002:binary" - yamlMergeTag = "tag:yaml.org,2002:merge" - - yamlDefaultScalarTag = yamlStrTag // The default scalar tag is !!str. - yamlDefaultSequenceTag = yamlSeqTag // The default sequence tag is !!seq. - yamlDefaultMappingTag = yamlMapTag // The default mapping tag is !!map. -) - -type yamlNodeTypeT int - -// Node types. -const ( - // An empty node. - yamlNoNode yamlNodeTypeT = iota - - yamlScalarNode // A scalar node. - yamlSequenceNode // A sequence node. - yamlMappingNode // A mapping node. -) - -// An element of a sequence node. -type yamlNodeItemT int - -// An element of a mapping node. -type yamlNodePairT struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yamlNodeT struct { - typ yamlNodeTypeT // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yamlScalarStyleT // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - itemsData []yamlNodeItemT // The stack of sequence items. - style yamlSequenceStyleT // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairsData []yamlNodePairT // The stack of mapping pairs (key, value). - pairsStart *yamlNodePairT // The beginning of the stack. - pairsEnd *yamlNodePairT // The end of the stack. - pairsTop *yamlNodePairT // The top of the stack. - style yamlMappingStyleT // The mapping style. - } - - startMark yamlMarkT // The beginning of the node. - endMark yamlMarkT // The end of the node. - -} - -// The document structure. -type yamlDocumentT struct { - - // The document nodes. - nodes []yamlNodeT - - // The version directive. - versionDirective *yamlVersionDirectiveT - - // The list of tag directives. - tagDirectivesData []yamlTagDirectiveT - tagDirectivesStart int // The beginning of the tag directives list. - tagDirectivesEnd int // The end of the tag directives list. - - startImplicit int // Is the document start indicator implicit? - endImplicit int // Is the document end indicator implicit? - - // The start/end of the document. - startMark, endMark yamlMarkT -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yamlReadHandlerT func(parser *yamlParserT, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yamlSimpleKeyT struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - tokenNumber int // The number of the token. - mark yamlMarkT // The position mark. -} - -// The states of the parser. -type yamlParserStateT int - -const ( - yamlParseStreamStartState yamlParserStateT = iota - - yamlParseImplicitDocumentStartState // Expect the beginning of an implicit document. - yamlParseDocumentStartState // Expect DOCUMENT-START. - yamlParseDocumentContentState // Expect the content of a document. - yamlParseDocumentEndState // Expect DOCUMENT-END. - yamlParseBlockNodeState // Expect a block node. - yamlParseBlockNodeOrIndentlessSequenceState // Expect a block node or indentless sequence. - yamlParseFlowNodeState // Expect a flow node. - yamlParseBlockSequenceFirstEntryState // Expect the first entry of a block sequence. - yamlParseBlockSequenceEntryState // Expect an entry of a block sequence. - yamlParseIndentlessSequenceEntryState // Expect an entry of an indentless sequence. - yamlParseBlockMappingFirstKeyState // Expect the first key of a block mapping. - yamlParseBlockMappingKeyState // Expect a block mapping key. - yamlParseBlockMappingValueState // Expect a block mapping value. - yamlParseFlowSequenceFirstEntryState // Expect the first entry of a flow sequence. - yamlParseFlowSequenceEntryState // Expect an entry of a flow sequence. - yamlParseFlowSequenceEntryMappingKeyState // Expect a key of an ordered mapping. - yamlParseFlowSequenceEntryMappingValueState // Expect a value of an ordered mapping. - yamlParseFlowSequenceEntryMappingEndState // Expect the and of an ordered mapping entry. - yamlParseFlowMappingFirstKeyState // Expect the first key of a flow mapping. - yamlParseFlowMappingKeyState // Expect a key of a flow mapping. - yamlParseFlowMappingValueState // Expect a value of a flow mapping. - yamlParseFlowMappingEmptyValueState // Expect an empty value of a flow mapping. - yamlParseEndState // Expect nothing. -) - -func (ps yamlParserStateT) String() string { - switch ps { - case yamlParseStreamStartState: - return "yaml_PARSE_STREAM_START_STATE" - case yamlParseImplicitDocumentStartState: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yamlParseDocumentStartState: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yamlParseDocumentContentState: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yamlParseDocumentEndState: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yamlParseBlockNodeState: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yamlParseBlockNodeOrIndentlessSequenceState: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yamlParseFlowNodeState: - return "yaml_PARSE_FLOW_NODE_STATE" - case yamlParseBlockSequenceFirstEntryState: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yamlParseBlockSequenceEntryState: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yamlParseIndentlessSequenceEntryState: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yamlParseBlockMappingFirstKeyState: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yamlParseBlockMappingKeyState: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yamlParseBlockMappingValueState: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yamlParseFlowSequenceFirstEntryState: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yamlParseFlowSequenceEntryState: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yamlParseFlowSequenceEntryMappingKeyState: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yamlParseFlowSequenceEntryMappingValueState: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yamlParseFlowSequenceEntryMappingEndState: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yamlParseFlowMappingFirstKeyState: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yamlParseFlowMappingKeyState: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yamlParseFlowMappingValueState: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yamlParseFlowMappingEmptyValueState: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yamlParseEndState: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yamlAliasDataT struct { - anchor []byte // The anchor. - index int // The node id. - mark yamlMarkT // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yamlParserT struct { - - // Error handling - - error yamlErrorTypeT // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problemOffset int - problemValue int - problemMark yamlMarkT - - // The error context. - context string - contextMark yamlMarkT - - // Reader stuff - - readHandler yamlReadHandlerT // Read handler. - - inputReader io.Reader // File input data. - input []byte // String input data. - inputPos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - bufferPos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - rawBuffer []byte // The raw buffer. - rawBufferPos int // The current position of the buffer. - - encoding yamlEncodingT // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yamlMarkT // The mark of the current position. - - // Scanner stuff - - streamStartProduced bool // Have we started to scan the input stream? - streamEndProduced bool // Have we reached the end of the input stream? - - flowLevel int // The number of unclosed '[' and '{' indicators. - - tokens []yamlTokenT // The tokens queue. - tokensHead int // The head of the tokens queue. - tokensParsed int // The number of tokens fetched from the queue. - tokenAvailable bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simpleKeyAllowed bool // May a simple key occur at the current position? - simpleKeys []yamlSimpleKeyT // The stack of simple keys. - - // Parser stuff - - state yamlParserStateT // The current parser state. - states []yamlParserStateT // The parser states stack. - marks []yamlMarkT // The stack of marks. - tagDirectives []yamlTagDirectiveT // The list of TAG directives. - - // Dumper stuff - - aliases []yamlAliasDataT // The alias data. - - document *yamlDocumentT // The currently parsed document. - - comments []yamlTokenT - pendingSeqItemEvent *yamlEventT -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yamlWriteHandlerT func(emitter *yamlEmitterT, buffer []byte) error - -type yamlEmitterStateT int - -// The emitter states. -const ( - // Expect STREAM-START. - yamlEmitStreamStartState yamlEmitterStateT = iota - - yamlEmitFirstDocumentStartState // Expect the first DOCUMENT-START or STREAM-END. - yamlEmitDocumentStartState // Expect DOCUMENT-START or STREAM-END. - yamlEmitDocumentContentState // Expect the content of a document. - yamlEmitDocumentEndState // Expect DOCUMENT-END. - yamlEmitFlowSequenceFirstItemState // Expect the first item of a flow sequence. - yamlEmitFlowSequenceItemState // Expect an item of a flow sequence. - yamlEmitFlowMappingFirstKeyState // Expect the first key of a flow mapping. - yamlEmitFlowMappingKeyState // Expect a key of a flow mapping. - yamlEmitFlowMappingSimpleValueState // Expect a value for a simple key of a flow mapping. - yamlEmitFlowMappingValueState // Expect a value of a flow mapping. - yamlEmitBlockSequenceFirstItemState // Expect the first item of a block sequence. - yamlEmitBlockSequenceItemState // Expect an item of a block sequence. - yamlEmitBlockMappingFirstKeyState // Expect the first key of a block mapping. - yamlEmitBlockMappingKeyState // Expect the key of a block mapping. - yamlEmitBlockMappingSimpleValueState // Expect a value for a simple key of a block mapping. - yamlEmitBlockMappingValueState // Expect a value of a block mapping. - yamlEmitEndState // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yamlEmitterT struct { - - // Error handling - - error yamlErrorTypeT // Error type. - problem string // Error description. - - // Writer stuff - - writeHandler yamlWriteHandlerT // Write handler. - - outputBuffer *[]byte // String output data. - outputWriter io.Writer // File output data. - - buffer []byte // The working buffer. - bufferPos int // The current position of the buffer. - - rawBuffer []byte // The raw buffer. - rawBufferPos int // The current position of the buffer. - - encoding yamlEncodingT // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - bestIndent int // The number of indentation spaces. - bestWidth int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - lineBreak yamlBreakT // The preferred line break. - - state yamlEmitterStateT // The current emitter state. - states []yamlEmitterStateT // The stack of states. - - events []yamlEventT // The event queue. - eventsHead int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tagDirectives []yamlTagDirectiveT // The list of tag directives. - - indent int // The current indentation level. - - flowLevel int // The current flow level. - - rootContext bool // Is it the document root context? - sequenceContext bool // Is it a sequence context? - mappingContext bool // Is it a mapping context? - simpleKeyContext bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - openEnded bool // If an explicit document end is required? - - // Anchor analysis. - anchorData struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tagData struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalarData struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flowPlainAllowed bool // Can the scalar be expessed in the flow plain style? - blockPlainAllowed bool // Can the scalar be expressed in the block plain style? - singleQuotedAllowed bool // Can the scalar be expressed in the single quoted style? - blockAllowed bool // Can the scalar be expressed in the literal or folded styles? - style yamlScalarStyleT // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - lastAnchorID int // The last assigned anchor id. - - document *yamlDocumentT // The currently emitted document. -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yamlprivateh.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yamlprivateh.go deleted file mode 100644 index c14d63baa..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -const ( - // The size of the input raw buffer. - inputRawBufferSize = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - inputBufferSize = inputRawBufferSize * 3 - - // The size of the output buffer. - outputBufferSize = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - outputRawBufferSize = (outputBufferSize*2 + 2) - - // The size of other stacks and queues. - initialStackSize = 16 - initialQueueSize = 16 - initialStringSize = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func isAlpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func isDigit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func asDigit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func isHex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func asHex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func isASCII(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func isPrintable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func isZ(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func isBom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func isSpace(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func isTab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func isBlank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func isBreak(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func isCrlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func isBreakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func isSpacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func isBlankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/node.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/node.go deleted file mode 100644 index 2c8a3cbc9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/node.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "encoding/json" - "fmt" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/orderedmap" - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -func (ds *DocumentSet) GetPosition() *filepos.Position { return ds.Position } -func (d *Document) GetPosition() *filepos.Position { return d.Position } -func (m *Map) GetPosition() *filepos.Position { return m.Position } -func (mi *MapItem) GetPosition() *filepos.Position { return mi.Position } -func (a *Array) GetPosition() *filepos.Position { return a.Position } -func (ai *ArrayItem) GetPosition() *filepos.Position { return ai.Position } -func (s *Scalar) GetPosition() *filepos.Position { return s.Position } - -func (ds *DocumentSet) SetPosition(position *filepos.Position) { ds.Position = position } -func (d *Document) SetPosition(position *filepos.Position) { d.Position = position } -func (m *Map) SetPosition(position *filepos.Position) { m.Position = position } -func (mi *MapItem) SetPosition(position *filepos.Position) { mi.Position = position } -func (a *Array) SetPosition(position *filepos.Position) { a.Position = position } -func (ai *ArrayItem) SetPosition(position *filepos.Position) { ai.Position = position } - -func (ds *DocumentSet) ValueTypeAsString() string { return "documentSet" } -func (d *Document) ValueTypeAsString() string { return typeToString(d.Value) } -func (m *Map) ValueTypeAsString() string { return "map" } -func (mi *MapItem) ValueTypeAsString() string { return typeToString(mi.Value) } -func (a *Array) ValueTypeAsString() string { return "array" } -func (ai *ArrayItem) ValueTypeAsString() string { return typeToString(ai.Value) } -func (s *Scalar) ValueTypeAsString() string { return typeToString(s.Value) } - -func typeToString(value interface{}) string { - switch value.(type) { - case float64: - return "float" - case int, int64, uint64: - return "integer" - case bool: - return "boolean" - case nil: - return "null" - default: - if t, ok := value.(TypeWithValues); ok { - return t.ValueTypeAsString() - } - return fmt.Sprintf("%T", value) - } -} - -func (ds *DocumentSet) SetValue(val interface{}) error { - return fmt.Errorf("cannot set value on a documentset") -} - -func (d *Document) SetValue(val interface{}) error { - d.ResetValue() - return d.AddValue(val) -} - -func (m *Map) SetValue(val interface{}) error { - return fmt.Errorf("cannot set value on a map") -} - -func (mi *MapItem) SetValue(val interface{}) error { - mi.ResetValue() - return mi.AddValue(val) -} - -func (a *Array) SetValue(val interface{}) error { - return fmt.Errorf("cannot set value on an array") -} - -func (ai *ArrayItem) SetValue(val interface{}) error { - ai.ResetValue() - return ai.AddValue(val) -} - -func isValidValue(val interface{}) bool { - switch val.(type) { - case *Map, *orderedmap.Map, - *Array, []interface{}, - int, int8, int16, int32, int64, - uint, uint8, uint16, uint32, uint64, - float32, float64, - bool, - string, - nil: - return true - default: - return false - } -} - -func (ds *DocumentSet) ResetValue() { ds.Items = nil } -func (d *Document) ResetValue() { d.Value = nil } -func (m *Map) ResetValue() { m.Items = nil } -func (mi *MapItem) ResetValue() { mi.Value = nil } -func (a *Array) ResetValue() { a.Items = nil } -func (ai *ArrayItem) ResetValue() { ai.Value = nil } - -func (ds *DocumentSet) AddValue(val interface{}) error { - if item, ok := val.(*Document); ok { - ds.Items = append(ds.Items, item) - return nil - } - return fmt.Errorf("cannot add non-document value (%T) into documentset", val) -} - -func (d *Document) AddValue(val interface{}) error { - if !isValidValue(val) { - return fmt.Errorf("documents can only contain arrays, maps, or scalars; this is a %T", val) - } - d.Value = val - return nil -} - -func (m *Map) AddValue(val interface{}) error { - if item, ok := val.(*MapItem); ok { - m.Items = append(m.Items, item) - return nil - } - return fmt.Errorf("cannot add non-map-item value (%T) into map", val) -} - -func (mi *MapItem) AddValue(val interface{}) error { - if !isValidValue(val) { - return fmt.Errorf("mapitems can only contain arrays, maps, or scalars; this is a %T", val) - } - mi.Value = val - return nil -} - -func (a *Array) AddValue(val interface{}) error { - if item, ok := val.(*ArrayItem); ok { - a.Items = append(a.Items, item) - return nil - } - return fmt.Errorf("cannot add non-array-item value (%T) into array", val) -} - -func (ai *ArrayItem) AddValue(val interface{}) error { - if !isValidValue(val) { - return fmt.Errorf("arrayitems can only contain maps, arrays, or scalars; this is a %T", val) - } - ai.Value = val - return nil -} - -func (ds *DocumentSet) GetValues() []interface{} { - var result []interface{} - for _, item := range ds.Items { - result = append(result, item) - } - return result -} - -func (d *Document) GetValues() []interface{} { return []interface{}{d.Value} } - -func (m *Map) GetValues() []interface{} { - var result []interface{} - for _, item := range m.Items { - result = append(result, item) - } - return result -} - -func (mi *MapItem) GetValues() []interface{} { return []interface{}{mi.Value} } - -func (a *Array) GetValues() []interface{} { - var result []interface{} - for _, item := range a.Items { - result = append(result, item) - } - return result -} - -func (ai *ArrayItem) GetValues() []interface{} { return []interface{}{ai.Value} } -func (s *Scalar) GetValues() []interface{} { return []interface{}{s.Value} } - -func (ds *DocumentSet) GetComments() []*Comment { return ds.Comments } -func (d *Document) GetComments() []*Comment { return d.Comments } -func (m *Map) GetComments() []*Comment { return m.Comments } -func (mi *MapItem) GetComments() []*Comment { return mi.Comments } -func (a *Array) GetComments() []*Comment { return a.Comments } -func (ai *ArrayItem) GetComments() []*Comment { return ai.Comments } - -func (ds *DocumentSet) addComments(comment *Comment) { ds.Comments = append(ds.Comments, comment) } -func (d *Document) addComments(comment *Comment) { d.Comments = append(d.Comments, comment) } -func (m *Map) addComments(comment *Comment) { - panic(fmt.Sprintf("Attempted to attach comment (%s) to Map (%v); maps cannot carry comments", comment.Data, m)) -} -func (mi *MapItem) addComments(comment *Comment) { mi.Comments = append(mi.Comments, comment) } -func (a *Array) addComments(comment *Comment) { - panic(fmt.Sprintf("Attempted to attach comment (%s) to Array (%v); arrays cannot carry comments", comment.Data, a)) -} -func (ai *ArrayItem) addComments(comment *Comment) { ai.Comments = append(ai.Comments, comment) } - -func (ds *DocumentSet) GetAnnotations() interface{} { return ds.annotations } -func (d *Document) GetAnnotations() interface{} { return d.annotations } -func (m *Map) GetAnnotations() interface{} { return m.annotations } -func (mi *MapItem) GetAnnotations() interface{} { return mi.annotations } -func (a *Array) GetAnnotations() interface{} { return a.annotations } -func (ai *ArrayItem) GetAnnotations() interface{} { return ai.annotations } - -func (ds *DocumentSet) SetAnnotations(anns interface{}) { ds.annotations = anns } -func (d *Document) SetAnnotations(anns interface{}) { d.annotations = anns } -func (m *Map) SetAnnotations(anns interface{}) { m.annotations = anns } -func (mi *MapItem) SetAnnotations(anns interface{}) { mi.annotations = anns } -func (a *Array) SetAnnotations(anns interface{}) { a.annotations = anns } -func (ai *ArrayItem) SetAnnotations(anns interface{}) { ai.annotations = anns } - -type TypeCheck struct { - Violations []error -} - -func (tc TypeCheck) Error() string { - if !tc.HasViolations() { - return "" - } - - msg := "" - for _, err := range tc.Violations { - msg += err.Error() + "\n" - } - return msg -} - -func (tc *TypeCheck) HasViolations() bool { - return len(tc.Violations) > 0 -} - -func (ds *DocumentSet) Check() TypeCheck { return TypeCheck{} } -func (d *Document) Check() (chk TypeCheck) { - switch typedContents := d.Value.(type) { - case Node: - chk = typedContents.Check() - } - - return chk -} -func (m *Map) Check() (chk TypeCheck) { - if m.Type == nil { - return - } - check := m.Type.CheckType(m) - if check.HasViolations() { - chk.Violations = append(chk.Violations, check.Violations...) - return - } - - for _, item := range m.Items { - check = item.Check() - if check.HasViolations() { - chk.Violations = append(chk.Violations, check.Violations...) - } - } - return -} -func (mi *MapItem) Check() (chk TypeCheck) { - check := mi.Type.CheckType(mi) - if check.HasViolations() { - chk.Violations = check.Violations - return - } - - check = checkCollectionItem(mi.Value, mi.Type.GetValueType(), mi.Position) - if check.HasViolations() { - chk.Violations = append(chk.Violations, check.Violations...) - } - return -} -func (a *Array) Check() (chk TypeCheck) { - for _, item := range a.Items { - check := item.Check() - if check.HasViolations() { - chk.Violations = append(chk.Violations, check.Violations...) - } - } - return -} -func (ai *ArrayItem) Check() (chk TypeCheck) { - if ai.Type == nil { - return - } - // TODO: This check only ensures that the ai is of ArrayItem type - // which we know because if it was not we would not assign - // the type to it. - // Given this maybe we can completely remove this check - // Lets not forget that the check of the type of the item - // is done by checkCollectionItem - chk = ai.Type.CheckType(ai) - if chk.HasViolations() { - return - } - - check := checkCollectionItem(ai.Value, ai.Type.GetValueType(), ai.Position) - if check.HasViolations() { - chk.Violations = append(chk.Violations, check.Violations...) - } - return chk -} - -// is it possible to enter this function with valueType=NullType or AnyType? -func checkCollectionItem(value interface{}, valueType Type, position *filepos.Position) (chk TypeCheck) { - switch typedValue := value.(type) { - case *Map: - check := typedValue.Check() - chk.Violations = append(chk.Violations, check.Violations...) - case *Array: - check := typedValue.Check() - chk.Violations = append(chk.Violations, check.Violations...) - default: - chk = valueType.CheckType(&Scalar{Value: value, Position: position}) - } - return chk -} - -// Below methods disallow marshaling of nodes directly -var _ []yaml.Marshaler = []yaml.Marshaler{&DocumentSet{}, &Document{}, &Map{}, &MapItem{}, &Array{}, &ArrayItem{}} - -func (ds *DocumentSet) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of docset") } -func (d *Document) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of doc") } -func (m *Map) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of map") } -func (mi *MapItem) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of mapitem") } -func (a *Array) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of array") } -func (ai *ArrayItem) MarshalYAML() (interface{}, error) { panic("Unexpected marshaling of arrayitem") } - -// Below methods disallow marshaling of nodes directly -var _ []json.Marshaler = []json.Marshaler{&DocumentSet{}, &Document{}, &Map{}, &MapItem{}, &Array{}, &ArrayItem{}} - -func (ds *DocumentSet) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of docset") } -func (d *Document) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of doc") } -func (m *Map) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of map") } -func (mi *MapItem) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of mapitem") } -func (a *Array) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of array") } -func (ai *ArrayItem) MarshalJSON() ([]byte, error) { panic("Unexpected marshaling of arrayitem") } - -func (ds *DocumentSet) sealed() {} -func (d *Document) sealed() {} -func (m *Map) sealed() {} -func (mi *MapItem) sealed() {} -func (a *Array) sealed() {} -func (ai *ArrayItem) sealed() {} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/parser.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/parser.go deleted file mode 100644 index 9bdaa7b14..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/parser.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -// TODO json repr inside yaml? - -import ( - "bytes" - "fmt" - "io" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -var ( - docStartMarkerCheck = regexp.MustCompile(`\A\s*---\s+`) - - // eg "yaml: line 2: found character that cannot start any token" - lineErrRegexp = regexp.MustCompile(`^(?Pyaml: line )(?P\d+)(?P: .+)$`) -) - -type ParserOpts struct { - WithoutComments bool - Strict bool -} - -type Parser struct { - opts ParserOpts - associatedName string -} - -func NewParser(opts ParserOpts) *Parser { - return &Parser{opts, ""} -} - -func (p *Parser) ParseBytes(data []byte, associatedName string) (*DocumentSet, error) { - p.associatedName = associatedName - - // YAML library uses 0-based line numbers for nodes (but, first line in a text file is typically line 1) - nodeLineCorrection := 1 - // YAML library uses 1-based line numbers for errors - errLineCorrection := 0 - startsWithDocMarker := docStartMarkerCheck.Match(data) - - if !startsWithDocMarker { - data = append([]byte("---\n"), data...) - - // we just prepended a line to the original input, correct for that: - nodeLineCorrection-- - errLineCorrection-- - } - - docSet, err := p.parseBytes(data, nodeLineCorrection) - if err != nil { - return docSet, p.correctLineNumInErr(err, errLineCorrection) - } - - // Change first document's line number to be 1 - // since we always present line numbers as 1 based - // (note that first doc marker may be several lines down) - if !startsWithDocMarker && !docSet.Items[0].Position.IsKnown() { - docSet.Items[0].Position = filepos.NewPosition(1) - docSet.Items[0].Position.SetFile(associatedName) - } - setPositionOfCollections(docSet, nil) - - return docSet, nil -} - -func (p *Parser) parseBytes(data []byte, lineCorrection int) (*DocumentSet, error) { - docSet := &DocumentSet{Position: filepos.NewUnknownPosition()} - - var lastUnassignedComments []*Comment - - dec := yaml.NewDecoder(bytes.NewReader(data)) - dec.SetForceMapSlice(true) - - if p.opts.Strict { - dec.SetStrictScalarResolve() - } - - lines := strings.Split(string(data), "\n") - - for { - var rawVal interface{} - - err := dec.Decode(&rawVal) - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - doc := &Document{ - Comments: lastUnassignedComments, - Value: p.parse(rawVal, lineCorrection, lines), - Position: p.newDocPosition(dec.DocumentStartLine(), lineCorrection, len(docSet.Items) == 0, lines), - } - - allComments, unassignedComments := p.assignComments(doc, dec.Comments(), lineCorrection) - docSet.AllComments = append(docSet.AllComments, allComments...) - lastUnassignedComments = unassignedComments - - docSet.Items = append(docSet.Items, doc) - } - - if len(lastUnassignedComments) > 0 { - endDoc := &Document{ - Comments: lastUnassignedComments, - Value: nil, - Position: filepos.NewUnknownPosition(), - injected: true, - } - docSet.Items = append(docSet.Items, endDoc) - } - - return docSet, nil -} - -// setPositionOfCollections assigns the Position of Maps and Arrays to their parent -// these kinds of nodes are not visible and therefore technically don't have a position. -// However, it is useful when communicating certain error cases to be able to reference -// a collection by line number. -// The position of the parent matches well with what the user sees. E.g. the MapItem that -// holds an Array is a great place to point at when referring to the entire array. -func setPositionOfCollections(node Node, parent Node) { - if !node.GetPosition().IsKnown() { - if parent != nil { - if mapNode, ok := node.(*Map); ok { - mapNode.Position = parent.GetPosition() - } - if arrayNode, ok := node.(*Array); ok { - arrayNode.Position = parent.GetPosition() - } - } - } - for _, val := range node.GetValues() { - child, isNode := val.(Node) - if isNode { - setPositionOfCollections(child, node) - } - } -} - -func (p *Parser) parse(val interface{}, lineCorrection int, lines []string) interface{} { - switch typedVal := val.(type) { - case yaml.MapSlice: - result := &Map{Position: p.newUnknownPosition()} - for _, item := range typedVal { - result.Items = append(result.Items, &MapItem{ - Key: item.Key, - Value: p.parse(item.Value, lineCorrection, lines), - Position: p.newPosition(item.Line, lineCorrection, lines[item.Line]), - }) - } - return result - - // As a precaution against yaml library returning non-ordered maps - case map[interface{}]interface{}: - panic("Unexpected map[interface{}]interface{} when parsing YAML, expected MapSlice") - - case []interface{}: - result := &Array{Position: filepos.NewUnknownPosition()} - for _, item := range typedVal { - if typedItem, ok := item.(yaml.ArrayItem); ok { - result.Items = append(result.Items, &ArrayItem{ - Value: p.parse(typedItem.Value, lineCorrection, lines), - Position: p.newPosition(typedItem.Line, lineCorrection, lines[typedItem.Line]), - }) - } else { - panic("unknown item") - } - } - return result - - default: - return val - } -} - -func (p *Parser) assignComments(val interface{}, comments []yaml.Comment, lineCorrection int) ([]*Comment, []*Comment) { - if p.opts.WithoutComments { - return nil, nil - } - - nodesAtLines := map[int][]Node{} - p.buildLineLocs(val, nodesAtLines) - - lineNums := p.buildLineNums(nodesAtLines) - allComments := []*Comment{} - unassignedComments := []*Comment{} - - for _, comment := range comments { - comment := &Comment{ - Data: comment.Data, - Position: p.newPosition(comment.Line, lineCorrection, comment.Data), - } - allComments = append(allComments, comment) - - var foundOwner bool - - for _, lineNum := range lineNums { - // Always looking at the same line or "above" (greater line number) - if comment.Position.LineNum() > lineNum { - continue - } - nodes, ok := nodesAtLines[lineNum] - if ok { - // Last node on the line is the one that owns inline comment - // otherwise it's the first one (outermost one) - // TODO any other better way to determine? - if comment.Position.LineNum() == lineNum { - nodes[len(nodes)-1].addComments(comment) - } else { - nodes[0].addComments(comment) - } - foundOwner = true - break - } - } - - if !foundOwner { - unassignedComments = append(unassignedComments, comment) - } - } - - return allComments, unassignedComments -} - -func (p *Parser) buildLineLocs(val interface{}, nodeAtLines map[int][]Node) { - if node, ok := val.(Node); ok { - if node.GetPosition().IsKnown() { - nodeAtLines[node.GetPosition().LineNum()] = append(nodeAtLines[node.GetPosition().LineNum()], node) - } - - for _, childVal := range node.GetValues() { - p.buildLineLocs(childVal, nodeAtLines) - } - } -} - -func (p *Parser) buildLineNums(nodeAtLines map[int][]Node) []int { - var result []int - for lineNum := range nodeAtLines { - result = append(result, lineNum) - } - sort.Ints(result) - return result -} - -func (p *Parser) correctLineNumInErr(err error, correction int) error { - submatches := lineErrRegexp.FindAllStringSubmatch(err.Error(), -1) - if len(submatches) != 1 || len(submatches[0]) != 4 { - return err - } - - actualLineNum, parseErr := strconv.Atoi(submatches[0][2]) - if parseErr != nil { - return err - } - - return fmt.Errorf("%s%d%s", submatches[0][1], p.newPosition(actualLineNum, correction, "").LineNum(), submatches[0][3]) -} - -func (p *Parser) newDocPosition(actualLineNum, correction int, firstDoc bool, lines []string) *filepos.Position { - if firstDoc && actualLineNum+correction == 0 { - return p.newUnknownPosition() - } - return p.newPosition(actualLineNum, correction, lines[actualLineNum]) -} - -func (p *Parser) newPosition(actualLineNum, correction int, line string) *filepos.Position { - pos := filepos.NewPosition(actualLineNum + correction) - pos.SetFile(p.associatedName) - pos.SetLine(line) - return pos -} - -func (p *Parser) newUnknownPosition() *filepos.Position { - pos := filepos.NewUnknownPosition() - pos.SetFile(p.associatedName) - return pos -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/plain.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/plain.go deleted file mode 100644 index d4c8246ec..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/plain.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "fmt" - "reflect" - - "github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2" -) - -func PlainMarshal(in interface{}) ([]byte, error) { - return yaml.Marshal(in) -} - -func PlainUnmarshal(data []byte, out interface{}) error { - docSet, err := NewParser(ParserOpts{WithoutComments: true}).ParseBytes(data, "") - if err != nil { - return err - } - - if len(docSet.Items) != 1 { - return fmt.Errorf("Expected to find exactly one YAML document") - } - - newVal := docSet.Items[0].AsInterface() - - outVal := reflect.ValueOf(out) - if newVal == nil { - outVal.Elem().Set(reflect.Zero(outVal.Elem().Type())) - } else { - outVal.Elem().Set(reflect.ValueOf(newVal)) - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/printer.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/printer.go deleted file mode 100644 index 607b957b4..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/printer.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "bytes" - "fmt" - "io" - - "github.com/k14s/ytt/pkg/filepos" -) - -type Printer struct { - writer io.Writer - opts PrinterOpts -} - -type PrinterOpts struct { - ExcludeRefs bool -} - -func NewPrinter(writer io.Writer) Printer { - return Printer{writer, PrinterOpts{}} -} - -func NewPrinterWithOpts(writer io.Writer, opts PrinterOpts) Printer { - return Printer{writer, opts} -} - -func (p Printer) Print(val interface{}) { - fmt.Fprintf(p.writer, "%s", p.PrintStr(val)) -} - -func (p Printer) PrintStr(val interface{}) string { - buf := new(bytes.Buffer) - p.print(val, "", buf) - return buf.String() -} - -func (p Printer) print(val interface{}, indent string, writer io.Writer) { - const indentLvl = " " - - switch typedVal := val.(type) { - case *DocumentSet: - fmt.Fprintf(writer, "%s%s: docset%s\n", indent, p.lineStr(typedVal.Position), p.ptrStr(typedVal)) - p.printComments(typedVal.Comments, indent, writer) - - for _, item := range typedVal.Items { - p.print(item, indent+indentLvl, writer) - } - - case *Document: - fmt.Fprintf(writer, "%s%s: doc%s\n", indent, p.lineStr(typedVal.Position), p.ptrStr(typedVal)) - p.printComments(typedVal.Comments, indent, writer) - p.print(typedVal.Value, indent+indentLvl, writer) - - case *Map: - fmt.Fprintf(writer, "%s%s: map%s\n", indent, p.lineStr(typedVal.Position), p.ptrStr(typedVal)) - p.printComments(typedVal.Comments, indent, writer) - - for _, item := range typedVal.Items { - fmt.Fprintf(writer, "%s%s: key=%s%s\n", indent, p.lineStr(item.Position), item.Key, p.ptrStr(item)) - p.printComments(item.Comments, indent, writer) - p.print(item.Value, indent+indentLvl, writer) - } - - case *MapItem: - fmt.Fprintf(writer, "%s%s: key=%s%s\n", indent, p.lineStr(typedVal.Position), typedVal.Key, p.ptrStr(typedVal)) - p.printComments(typedVal.Comments, indent, writer) - p.print(typedVal.Value, indent+indentLvl, writer) - - case *Array: - fmt.Fprintf(writer, "%s%s: array%s\n", indent, p.lineStr(typedVal.Position), p.ptrStr(typedVal)) - p.printComments(typedVal.Comments, indent, writer) - - for i, item := range typedVal.Items { - fmt.Fprintf(writer, "%s%s: idx=%d%s\n", indent, p.lineStr(item.Position), i, p.ptrStr(item)) - p.printComments(item.Comments, indent, writer) - p.print(item.Value, indent+indentLvl, writer) - } - - case *ArrayItem: - fmt.Fprintf(writer, "%s%s: idx=top%s\n", indent, p.lineStr(typedVal.Position), p.ptrStr(typedVal)) - p.printComments(typedVal.Comments, indent, writer) - p.print(typedVal.Value, indent+indentLvl, writer) - - default: - fmt.Fprintf(writer, "%s: %v\n", indent, typedVal) - } -} - -func (p Printer) lineStr(pos *filepos.Position) string { - return pos.As4DigitString() -} - -func (p Printer) ptrStr(node Node) string { - if !p.opts.ExcludeRefs { - return fmt.Sprintf(" (obj=%p)", node) - } - return "" -} - -func (p Printer) printComments(comments []*Comment, indent string, writer io.Writer) { - for _, comment := range comments { - fmt.Fprintf(writer, "%scomment: %s: '%s'\n", indent, p.lineStr(comment.Position), comment.Data) - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/printers.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/printers.go deleted file mode 100644 index cd83782b3..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/printers.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/k14s/ytt/pkg/orderedmap" -) - -type DocumentPrinter interface { - Print(*Document) error -} - -type YAMLPrinter struct { - buf io.Writer - writtenOnce bool -} - -var _ DocumentPrinter = &YAMLPrinter{} - -func NewYAMLPrinter(writer io.Writer) *YAMLPrinter { - return &YAMLPrinter{writer, false} -} - -func (p *YAMLPrinter) Print(item *Document) error { - if p.writtenOnce { - p.buf.Write([]byte("---\n")) // TODO use encoder? - } else { - p.writtenOnce = true - } - - bs, err := item.AsYAMLBytes() - if err != nil { - return fmt.Errorf("marshaling doc: %s", err) - } - p.buf.Write(bs) - return nil -} - -type JSONPrinter struct { - buf io.Writer -} - -var _ DocumentPrinter = &JSONPrinter{} - -func NewJSONPrinter(writer io.Writer) JSONPrinter { - return JSONPrinter{writer} -} - -func (p JSONPrinter) Print(item *Document) error { - val := item.AsInterface() - - bs, err := json.Marshal(orderedmap.Conversion{val}.AsUnorderedStringMaps()) - if err != nil { - return fmt.Errorf("marshaling doc: %s", err) - } - p.buf.Write(bs) - return nil -} - -type WrappedFilePositionPrinter struct { - Printer *FilePositionPrinter -} - -var _ DocumentPrinter = WrappedFilePositionPrinter{} - -func (p WrappedFilePositionPrinter) Print(item *Document) error { - p.Printer.Print(item) - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/type.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/type.go deleted file mode 100644 index 163e39a5f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/type.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -import ( - "github.com/k14s/ytt/pkg/filepos" -) - -type Type interface { - AssignTypeTo(typeable Typeable) TypeCheck - GetValueType() Type - GetDefaultValue() interface{} - CheckType(node TypeWithValues) TypeCheck - GetDefinitionPosition() *filepos.Position - String() string -} - -type TypeWithValues interface { - GetValues() []interface{} - GetPosition() *filepos.Position - ValueTypeAsString() string -} - -type Typeable interface { - TypeWithValues - - SetType(Type) -} - -var _ Typeable = (*Document)(nil) -var _ Typeable = (*Map)(nil) -var _ Typeable = (*MapItem)(nil) -var _ Typeable = (*Array)(nil) -var _ Typeable = (*ArrayItem)(nil) - -func (d *Document) SetType(t Type) { d.Type = t } -func (m *Map) SetType(t Type) { m.Type = t } -func (mi *MapItem) SetType(t Type) { mi.Type = t } -func (a *Array) SetType(t Type) { a.Type = t } -func (ai *ArrayItem) SetType(t Type) { ai.Type = t } diff --git a/vendor/github.com/k14s/ytt/pkg/yamlmeta/value_holding_node.go b/vendor/github.com/k14s/ytt/pkg/yamlmeta/value_holding_node.go deleted file mode 100644 index 760fb2c2f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamlmeta/value_holding_node.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamlmeta - -func (d *Document) Val() interface{} { - return d.Value -} -func (mi *MapItem) Val() interface{} { - return mi.Value -} -func (ai *ArrayItem) Val() interface{} { - return ai.Value -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamltemplate/evaluation_ctx.go b/vendor/github.com/k14s/ytt/pkg/yamltemplate/evaluation_ctx.go deleted file mode 100644 index bac9446fe..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamltemplate/evaluation_ctx.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamltemplate - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/orderedmap" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - EvaluationCtxDialectName template.EvaluationCtxDialectName = "yaml" -) - -type EvaluationCtx struct { - implicitMapKeyOverrides bool -} - -var _ template.EvaluationCtxDialect = EvaluationCtx{} - -func (e EvaluationCtx) PrepareNode( - parentNode template.EvaluationNode, val template.EvaluationNode) error { - - if typedMap, ok := parentNode.(*yamlmeta.Map); ok { - if typedMapItem, ok := val.(*yamlmeta.MapItem); ok { - return MapItemOverride{e.implicitMapKeyOverrides}.Apply(typedMap, typedMapItem, true) - } - } - return nil -} - -func (e EvaluationCtx) SetMapItemKey(node template.EvaluationNode, val interface{}) error { - if item, ok := node.(*yamlmeta.MapItem); ok { - item.Key = val - return nil - } - - panic(fmt.Sprintf("expected node '%T' to be MapItem", node)) -} - -func (e EvaluationCtx) Replace( - parentNodes []template.EvaluationNode, val interface{}) error { - - switch typedCurrNode := parentNodes[len(parentNodes)-1].(type) { - case *yamlmeta.Document: - if len(parentNodes) < 2 { - return fmt.Errorf("expected to find document set, but was not enough parents") - } - - parentNode := parentNodes[len(parentNodes)-2] - typedParentNode, ok := parentNode.(*yamlmeta.DocumentSet) - if !ok { - return fmt.Errorf("expected to find document set, but was %T", parentNode) - } - - return e.replaceItemInDocSet(typedParentNode, typedCurrNode, val) - - case *yamlmeta.MapItem: - if len(parentNodes) < 2 { - return fmt.Errorf("expected to find map, but was not enough parents") - } - - parentNode := parentNodes[len(parentNodes)-2] - typedParentNode, ok := parentNode.(*yamlmeta.Map) - if !ok { - return fmt.Errorf("expected parent of map item to be map, but was %T", parentNode) - } - - return e.replaceItemInMap(typedParentNode, typedCurrNode, val) - - case *yamlmeta.ArrayItem: - if len(parentNodes) < 2 { - return fmt.Errorf("expected to find array, but was not enough parents") - } - - parentNode := parentNodes[len(parentNodes)-2] - typedParentNode, ok := parentNode.(*yamlmeta.Array) - if !ok { - return fmt.Errorf("expected parent of array item to be array, but was %T", parentNode) - } - - return e.replaceItemInArray(typedParentNode, typedCurrNode, val) - - default: - return fmt.Errorf("expected to replace document value, map item or array item, but found %T", typedCurrNode) - } -} - -func (e EvaluationCtx) replaceItemInDocSet(dstDocSet *yamlmeta.DocumentSet, placeholderItem *yamlmeta.Document, val interface{}) error { - insertItems, err := e.convertValToDocSetItems(val) - if err != nil { - return err - } - - for i, item := range dstDocSet.Items { - if item == placeholderItem { - newItems := dstDocSet.Items[:i] - newItems = append(newItems, insertItems...) - newItems = append(newItems, dstDocSet.Items[i+1:]...) - dstDocSet.Items = newItems - return nil - } - } - - return fmt.Errorf("expected to find placeholder doc in docset") -} - -func (e EvaluationCtx) convertValToDocSetItems(val interface{}) ([]*yamlmeta.Document, error) { - result := []*yamlmeta.Document{} - - switch typedVal := val.(type) { - case []interface{}: - for _, item := range typedVal { - result = append(result, &yamlmeta.Document{Value: item, Position: filepos.NewUnknownPosition()}) - } - - case *yamlmeta.DocumentSet: - result = typedVal.Items - - default: - return nil, fmt.Errorf("expected value to be docset, but was %T", val) - } - - return result, nil -} - -func (e EvaluationCtx) replaceItemInMap( - dstMap *yamlmeta.Map, placeholderItem *yamlmeta.MapItem, val interface{}) error { - - insertItems, carryMeta, err := e.convertValToMapItems(val, placeholderItem.Position.DeepCopy()) - if err != nil { - return err - } - - // If map items does not carry metadata - // we cannot check for override conflicts - for _, newItem := range insertItems { - err := MapItemOverride{e.implicitMapKeyOverrides}.Apply(dstMap, newItem, carryMeta) - if err != nil { - return err - } - } - - for i, item := range dstMap.Items { - if item == placeholderItem { - newItems := dstMap.Items[:i] - newItems = append(newItems, insertItems...) - newItems = append(newItems, dstMap.Items[i+1:]...) - dstMap.Items = newItems - return nil - } - } - - return fmt.Errorf("expected to find placeholder map item in map") -} - -func (e EvaluationCtx) convertValToMapItems(val interface{}, position *filepos.Position) ([]*yamlmeta.MapItem, bool, error) { - switch typedVal := val.(type) { - case *orderedmap.Map: - result := []*yamlmeta.MapItem{} - typedVal.Iterate(func(k, v interface{}) { - item := &yamlmeta.MapItem{Key: k, Value: yamlmeta.NewASTFromInterfaceWithPosition(v, position), Position: position} - result = append(result, item) - }) - return result, false, nil - - case *yamlmeta.Map: - return typedVal.Items, true, nil - - default: - return nil, false, fmt.Errorf("expected value to be map, but was %T", val) - } -} - -func (e EvaluationCtx) replaceItemInArray(dstArray *yamlmeta.Array, placeholderItem *yamlmeta.ArrayItem, val interface{}) error { - insertItems, err := e.convertValToArrayItems(val, placeholderItem.Position.DeepCopy()) - if err != nil { - return err - } - - for i, item := range dstArray.Items { - if item == placeholderItem { - newItems := dstArray.Items[:i] - newItems = append(newItems, insertItems...) - newItems = append(newItems, dstArray.Items[i+1:]...) - dstArray.Items = newItems - return nil - } - } - - return fmt.Errorf("expected to find placeholder array item in array") -} - -func (e EvaluationCtx) convertValToArrayItems(val interface{}, position *filepos.Position) ([]*yamlmeta.ArrayItem, error) { - result := []*yamlmeta.ArrayItem{} - - switch typedVal := val.(type) { - case []interface{}: - for _, item := range typedVal { - result = append(result, &yamlmeta.ArrayItem{Value: yamlmeta.NewASTFromInterfaceWithPosition(item, position), Position: position}) - } - - case *yamlmeta.Array: - result = typedVal.Items - - default: - return nil, fmt.Errorf("expected value to be array, but was %T", val) - } - - return result, nil -} - -func (e EvaluationCtx) ShouldWrapRootValue(nodeVal interface{}) bool { - switch nodeVal.(type) { - case *yamlmeta.Document, *yamlmeta.MapItem, *yamlmeta.ArrayItem: - return true - default: - return false - } -} - -func (e EvaluationCtx) WrapRootValue(val interface{}) interface{} { - return &StarlarkFragment{val} -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamltemplate/go_value_with_yaml.go b/vendor/github.com/k14s/ytt/pkg/yamltemplate/go_value_with_yaml.go deleted file mode 100644 index a8381a1c3..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamltemplate/go_value_with_yaml.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamltemplate - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -func NewGoValueWithYAML(val interface{}) tplcore.GoValue { - convertFunc := func(valToConvert interface{}) (starlark.Value, bool) { - switch valToConvert.(type) { - case *yamlmeta.Map, *yamlmeta.Array, *yamlmeta.DocumentSet: - return &StarlarkFragment{valToConvert}, true - case *yamlmeta.MapItem, *yamlmeta.ArrayItem, *yamlmeta.Document: - panic(fmt.Sprintf("NewComplexGoValue: Unexpected %T in conversion of fragment", valToConvert)) - default: - return starlark.None, false - } - } - return tplcore.NewGoValueWithOpts(val, tplcore.GoValueOpts{Convert: convertFunc}) -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamltemplate/map_key_overrides.go b/vendor/github.com/k14s/ytt/pkg/yamltemplate/map_key_overrides.go deleted file mode 100644 index 4234ac313..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamltemplate/map_key_overrides.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamltemplate - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - AnnotationMapKeyOverride template.AnnotationName = "yaml/map-key-override" -) - -type MapItemOverride struct { - implicit bool -} - -func (d MapItemOverride) Apply( - typedMap *yamlmeta.Map, newItem *yamlmeta.MapItem, strict bool) error { - - itemIndex := map[interface{}]int{} - - for idx, item := range typedMap.Items { - itemIndex[item.Key] = idx - } - - if prevIdx, ok := itemIndex[newItem.Key]; ok { - if d.implicit || template.NewAnnotations(newItem).Has(AnnotationMapKeyOverride) || !strict { - typedMap.Items = append(typedMap.Items[:prevIdx], typedMap.Items[prevIdx+1:]...) - return nil - } - - return fmt.Errorf("expected key '%s' to not be specified again "+ - "(unless '%s' annotation is added)", newItem.Key, AnnotationMapKeyOverride) - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamltemplate/metas.go b/vendor/github.com/k14s/ytt/pkg/yamltemplate/metas.go deleted file mode 100644 index 5b103385f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamltemplate/metas.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamltemplate - -import ( - "fmt" - "strings" - "unicode" - - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -var ( - nodeSpecificKeywords = map[string]string{ - "if/end": "if", - "else/end": "else", - "elif/end": "elif", - "for/end": "for", - "def/end": "def", - } -) - -type Metas struct { - Block []*yamlmeta.Comment // meant to execute some code - Values []*yamlmeta.Comment // meant to return interpolated value - Annotations []CommentAndAnnotation - needsEnds int -} - -type CommentAndAnnotation struct { - Comment *yamlmeta.Comment - Annotation *template.Annotation -} - -type MetasOpts struct { - IgnoreUnknown bool -} - -func NewTemplateMetaFromYAMLComment(comment *yamlmeta.Comment, opts MetasOpts) (template.Meta, error) { - meta, err := template.NewMetaFromString(comment.Data, template.MetaOpts{IgnoreUnknown: opts.IgnoreUnknown}) - if err != nil { - return template.Meta{}, fmt.Errorf( - "Non-ytt comment at %s: '#%s': %s. (hint: if this is plain YAML — not a template — consider `--file-mark ':type=yaml-plain'`)", - comment.Position.AsString(), comment.Data, err) - } - return meta, nil -} - -func NewMetas(node yamlmeta.Node, opts MetasOpts) (Metas, error) { - metas := Metas{} - - for _, comment := range node.GetComments() { - meta, err := NewTemplateMetaFromYAMLComment(comment, opts) - if err != nil { - return metas, err - } - - for _, ann := range meta.Annotations { - if len(ann.Name) == 0 { - // Default code and value annotations to make templates less verbose - ann.Name = template.AnnotationCode - - if node.GetPosition().IsKnown() { - if comment.Position.LineNum() == node.GetPosition().LineNum() { - if len(node.GetValues()) > 0 && node.GetValues()[0] != nil { - return metas, fmt.Errorf( - "Expected YAML node at %s to have either computed or YAML value, but found both", - comment.Position.AsString()) - } - - ann.Name = template.AnnotationValue - } - } - } - - switch ann.Name { - case template.AnnotationValue: - metas.Values = append(metas.Values, &yamlmeta.Comment{ - Position: comment.Position, - Data: ann.Content, - }) - - case template.AnnotationCode: - if metas.needsEnds > 0 { - return metas, fmt.Errorf( - "Unexpected code at %s after use of '*/end', expected YAML node", - comment.Position.AsString()) - } - - code := ann.Content - spacePrefix := metas.spacePrefix(code) - - for keyword, replacementKeyword := range nodeSpecificKeywords { - if strings.HasPrefix(code, spacePrefix+keyword) { - metas.needsEnds++ - code = strings.Replace(code, spacePrefix+keyword, spacePrefix+replacementKeyword, 1) - } - } - - metas.Block = append(metas.Block, &yamlmeta.Comment{ - Position: comment.Position, - Data: code, - }) - - case template.AnnotationComment: - // ignore - - default: - metas.Annotations = append(metas.Annotations, CommentAndAnnotation{comment, ann}) - } - } - } - - return metas, nil -} - -func (m Metas) NeedsEnd() bool { return m.needsEnds != 0 } - -func (m Metas) spacePrefix(str string) string { - for i, r := range str { - if !unicode.IsSpace(r) { - return str[:i] - } - } - return "" -} diff --git a/vendor/github.com/k14s/ytt/pkg/yamltemplate/starlark_fragment.go b/vendor/github.com/k14s/ytt/pkg/yamltemplate/starlark_fragment.go deleted file mode 100644 index 0b1087298..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamltemplate/starlark_fragment.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamltemplate - -import ( - "fmt" - "reflect" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/syntax" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - starlarkFragmentType = "yamlfragment" -) - -type StarlarkFragment struct { - data interface{} -} - -var _ starlark.Value = &StarlarkFragment{} -var _ starlark.Comparable = (*StarlarkFragment)(nil) -var _ starlark.Sequence = (*StarlarkFragment)(nil) -var _ starlark.IterableMapping = (*StarlarkFragment)(nil) -var _ starlark.HasSetKey = (*StarlarkFragment)(nil) -var _ starlark.HasSetIndex = (*StarlarkFragment)(nil) -var _ starlark.Sliceable = (*StarlarkFragment)(nil) - -var _ tplcore.StarlarkValueToGoValueConversion = &StarlarkFragment{} -var _ tplcore.GoValueToStarlarkValueConversion = &StarlarkFragment{} - -func NewStarlarkFragment(data interface{}) *StarlarkFragment { - return &StarlarkFragment{data} -} - -func (s *StarlarkFragment) String() string { - return fmt.Sprintf("%s(%T)", starlarkFragmentType, s.data) -} -func (s *StarlarkFragment) Type() string { return starlarkFragmentType } -func (s *StarlarkFragment) Freeze() {} // TODO -func (s *StarlarkFragment) Truth() starlark.Bool { return starlark.Bool(s.Len() > 0) } -func (s *StarlarkFragment) Hash() (uint32, error) { - return 0, fmt.Errorf("unhashable type: %s", starlarkFragmentType) -} - -func (s *StarlarkFragment) AsGoValue() (interface{}, error) { return s.data, nil } -func (s *StarlarkFragment) AsStarlarkValue() starlark.Value { return s } - -func (s *StarlarkFragment) CompareSameType(op syntax.Token, y starlark.Value, depth int) (bool, error) { - return false, fmt.Errorf("%s.CompareSameType: Not implemented", starlarkFragmentType) // TODO -} - -func (s *StarlarkFragment) Get(k starlark.Value) (v starlark.Value, found bool, err error) { - wantedKey, err := tplcore.NewStarlarkValue(k).AsGoValue() - if err != nil { - return starlark.None, false, err - } - - switch typedData := s.data.(type) { - case nil: - // do nothing - - case *yamlmeta.Map: - for _, item := range typedData.Items { - if reflect.DeepEqual(item.Key, wantedKey) { - return NewGoValueWithYAML(item.Value).AsStarlarkValue(), true, nil - } - } - - case *yamlmeta.Array: - wantedInt, ok := wantedKey.(int64) - if !ok { - return starlark.None, false, fmt.Errorf( - "%s.Get: Expected array index to be an int64, but was %T", starlarkFragmentType, wantedKey) - } - - for i, item := range typedData.Items { - if wantedInt == int64(i) { - return NewGoValueWithYAML(item.Value).AsStarlarkValue(), true, nil - } - } - - case *yamlmeta.DocumentSet: - wantedInt, ok := wantedKey.(int64) - if !ok { - return starlark.None, false, fmt.Errorf( - "%s.Get: Expected document set index to be an int64, but was %T", wantedKey, starlarkFragmentType) - } - - for i, item := range typedData.Items { - if wantedInt == int64(i) { - return NewGoValueWithYAML(item.Value).AsStarlarkValue(), true, nil - } - } - - default: - panic(fmt.Sprintf("%s.Get: Expected value to be a map, array or docset, but was %T", - starlarkFragmentType, s.data)) - } - - return starlark.None, false, nil -} - -func (s *StarlarkFragment) SetKey(k, v starlark.Value) error { - return fmt.Errorf("%s.SetKey: Not implemented", starlarkFragmentType) // TODO -} - -func (s *StarlarkFragment) Index(i int) starlark.Value { - switch typedData := s.data.(type) { - case *yamlmeta.Array: - return NewGoValueWithYAML(typedData.Items[i].Value).AsStarlarkValue() - case *yamlmeta.DocumentSet: - return NewGoValueWithYAML(typedData.Items[i].Value).AsStarlarkValue() - default: - panic(fmt.Sprintf("%s.Index: Expected value to be a array or docset, but was %T", starlarkFragmentType, s.data)) - } -} - -func (s *StarlarkFragment) SetIndex(index int, v starlark.Value) error { - return fmt.Errorf("%s.SetIndex: Not implemented", starlarkFragmentType) // TODO -} - -func (s *StarlarkFragment) Len() int { - switch typedData := s.data.(type) { - case nil: - return 0 - case *yamlmeta.Map: - return len(typedData.Items) - case *yamlmeta.Array: - return len(typedData.Items) - case *yamlmeta.DocumentSet: - return len(typedData.Items) - default: - panic(fmt.Sprintf("%s.Len: Expected value to be a map, array or docset, but was %T", starlarkFragmentType, s.data)) - } -} - -// Items seems to be only used for splatting kwargs -func (s *StarlarkFragment) Items() []starlark.Tuple { - switch typedData := s.data.(type) { - case nil: - return []starlark.Tuple{} - - case *yamlmeta.Map: - var result []starlark.Tuple - for _, item := range typedData.Items { - result = append(result, starlark.Tuple{ - NewGoValueWithYAML(item.Key).AsStarlarkValue(), - NewGoValueWithYAML(item.Value).AsStarlarkValue(), - }) - } - return result - - default: - panic(fmt.Sprintf("%s.Items: Expected value to be a map, but was %T", starlarkFragmentType, s.data)) - } -} - -func (s *StarlarkFragment) Slice(start, end, step int) starlark.Value { - panic(fmt.Sprintf("%s.Slice: Not implemented", starlarkFragmentType)) // TODO -} - -func (s *StarlarkFragment) Iterate() starlark.Iterator { - switch typedData := s.data.(type) { - case nil: - return StarlarkFragmentNilIterator{} - case *yamlmeta.Map: - return &StarlarkFragmentKeysIterator{data: typedData} - case *yamlmeta.Array: - return &StarlarkFragmentValuesIterator{data: typedData} - case *yamlmeta.DocumentSet: - return &StarlarkFragmentValuesIterator{data: typedData} - default: - panic(fmt.Sprintf("%s.Iterate: Expected value to be a map, array or docset, but was %T", starlarkFragmentType, s.data)) - } -} - -type StarlarkFragmentNilIterator struct{} - -func (s StarlarkFragmentNilIterator) Next(p *starlark.Value) bool { return false } -func (s StarlarkFragmentNilIterator) Done() {} - -type StarlarkFragmentKeysIterator struct { - data *yamlmeta.Map - idx int -} - -func (s *StarlarkFragmentKeysIterator) Next(p *starlark.Value) bool { - if s.idx < len(s.data.Items) { - var val starlark.Value = NewGoValueWithYAML(s.data.Items[s.idx].Key).AsStarlarkValue() - *p = val - s.idx++ - return true - } - return false -} - -func (s *StarlarkFragmentKeysIterator) Done() {} - -type StarlarkFragmentValuesIterator struct { - data yamlmeta.Node - idx int -} - -func (s *StarlarkFragmentValuesIterator) Next(p *starlark.Value) bool { - if s.idx < len(s.data.GetValues()) { - var val starlark.Value - - switch typedData := s.data.(type) { - case *yamlmeta.Array: - val = NewGoValueWithYAML(typedData.Items[s.idx].Value).AsStarlarkValue() - case *yamlmeta.DocumentSet: - val = NewGoValueWithYAML(typedData.Items[s.idx].Value).AsStarlarkValue() - default: - panic(fmt.Sprintf("%s.Next: Expected value to be a array or docset, but was %T", starlarkFragmentType, s.data)) - } - *p = val - s.idx++ - return true - } - return false -} - -func (s *StarlarkFragmentValuesIterator) Done() {} diff --git a/vendor/github.com/k14s/ytt/pkg/yamltemplate/template.go b/vendor/github.com/k14s/ytt/pkg/yamltemplate/template.go deleted file mode 100644 index 17cea66d6..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yamltemplate/template.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yamltemplate - -import ( - "bytes" - "fmt" - - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/texttemplate" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - AnnotationTextTemplatedStrings template.AnnotationName = "yaml/text-templated-strings" -) - -type Template struct { - name string - opts TemplateOpts - docSet *yamlmeta.DocumentSet - nodes *template.Nodes - instructions *template.InstructionSet - - // memoized source lines - srcLinesByLine map[int]string -} - -type TemplateOpts struct { - IgnoreUnknownComments bool - ImplicitMapKeyOverrides bool -} - -func HasTemplating(node yamlmeta.Node) bool { - return hasTemplating(node) -} - -func hasTemplating(val interface{}) bool { - node, ok := val.(yamlmeta.Node) - if !ok { - return false - } - - metaOpts := MetasOpts{IgnoreUnknown: true} - for _, comment := range node.GetComments() { - meta, err := NewTemplateMetaFromYAMLComment(comment, metaOpts) - if err != nil { - return false - } - for _, meta := range meta.Annotations { - if meta.Name != template.AnnotationNameComment { - return true - } - } - } - - for _, childVal := range node.GetValues() { - if hasTemplating(childVal) { - return true - } - } - return false -} - -func NewTemplate(name string, opts TemplateOpts) *Template { - return &Template{name: name, opts: opts, instructions: template.NewInstructionSet()} -} - -func (e *Template) Compile(docSet *yamlmeta.DocumentSet) (*template.CompiledTemplate, error) { - e.docSet = docSet - e.nodes = template.NewNodes() - - code, err := e.build(docSet, nil, template.NodeTagRoot, buildOpts{}) - if err != nil { - return nil, err - } - - code = append([]template.Line{ - e.resetCtxType(), - {Instruction: e.instructions.NewStartCtx(EvaluationCtxDialectName)}, - }, code...) - - code = append(code, template.Line{ - Instruction: e.instructions.NewEndCtxNone(), // TODO ideally we would return array of docset - }) - - return template.NewCompiledTemplate(e.name, code, e.instructions, e.nodes, template.EvaluationCtxDialects{ - EvaluationCtxDialectName: EvaluationCtx{ - implicitMapKeyOverrides: e.opts.ImplicitMapKeyOverrides, - }, - texttemplate.EvaluationCtxDialectName: texttemplate.EvaluationCtx{}, - }), nil -} - -type buildOpts struct { - TextTemplatedStrings bool -} - -func (e *Template) build(val interface{}, parentNode yamlmeta.Node, parentTag template.NodeTag, opts buildOpts) ([]template.Line, error) { - node, ok := val.(yamlmeta.Node) - if !ok { - if valStr, ok := val.(string); ok && opts.TextTemplatedStrings { - return e.buildString(valStr, parentNode, parentTag, e.instructions.NewSetNodeValue) - } - - return []template.Line{{ - Instruction: e.instructions.NewSetNode(parentTag).WithDebug(e.debugComment(parentNode)), - SourceLine: e.newSourceLine(parentNode.GetPosition()), - }}, nil - } - - code := []template.Line{} - nodeTag := e.nodes.AddNode(node, parentTag) - - metas, err := NewMetas(node, MetasOpts{IgnoreUnknown: e.opts.IgnoreUnknownComments}) - if err != nil { - return nil, err - } - - if e.allowsTextTemplatedStrings(metas) { - opts.TextTemplatedStrings = true - } - - for _, blk := range metas.Block { - code = append(code, template.Line{ - Instruction: e.instructions.NewCode(blk.Data), - SourceLine: e.newSourceLine(blk.Position), - }) - } - - for _, metaAndAnn := range metas.Annotations { - code = append(code, template.Line{ - Instruction: e.instructions.NewStartNodeAnnotation(nodeTag, *metaAndAnn.Annotation).WithDebug(e.debugComment(node)), - SourceLine: e.newSourceLine(metaAndAnn.Comment.Position), - }) - } - - if typedNode, ok := val.(*yamlmeta.MapItem); ok { - if keyStr, ok := typedNode.Key.(string); ok && opts.TextTemplatedStrings { - templateLines, err := e.buildString(keyStr, node, nodeTag, e.instructions.NewSetMapItemKey) - if err != nil { - return nil, err - } - code = append(code, templateLines...) - } - } - - code = append(code, template.Line{ - Instruction: e.instructions.NewStartNode(nodeTag).WithDebug(e.debugComment(node)), - SourceLine: e.newSourceLine(node.GetPosition()), - }) - - if len(metas.Values) > 0 { - for _, val := range metas.Values { - code = append(code, template.Line{ - Instruction: e.instructions.NewSetNodeValue(nodeTag, val.Data).WithDebug(e.debugComment(node)), - SourceLine: e.newSourceLine(val.Position), - }) - } - } else { - for _, childVal := range node.GetValues() { - childCode, err := e.build(childVal, node, nodeTag, opts) - if err != nil { - return nil, err - } - code = append(code, childCode...) - } - } - - if metas.NeedsEnd() { - code = append(code, template.Line{ - // TODO should we set position to start node? - Instruction: e.instructions.NewCode("end"), - }) - } - - return code, nil -} - -func (e *Template) allowsTextTemplatedStrings(metas Metas) bool { - // TODO potentially use template.NewAnnotations(node).Has(AnnotationTextTemplatedStrings) - // however if node was not processed by the template, it wont have any annotations set - for _, metaAndAnn := range metas.Annotations { - if metaAndAnn.Annotation.Name == AnnotationTextTemplatedStrings { - return true - } - } - return false -} - -func (e *Template) buildString(val string, node yamlmeta.Node, nodeTag template.NodeTag, - instruction func(template.NodeTag, string) template.Instruction) ([]template.Line, error) { - - // TODO line numbers for inlined template are somewhat correct - // (does not handle pipe-multi-line string format - off by 1) - textRoot, err := texttemplate.NewParser().ParseWithPosition([]byte(val), e.name, node.GetPosition()) - if err != nil { - return nil, err - } - - code, err := texttemplate.NewTemplate(e.name).CompileInline(textRoot, e.instructions, e.nodes) - if err != nil { - return nil, err - } - - lastInstruction := code[len(code)-1].Instruction - if lastInstruction.Op() != e.instructions.EndCtx { - return nil, fmt.Errorf("Expected last instruction to be endctx, but was %#v", lastInstruction.Op()) - } - - code[len(code)-1] = template.Line{ - Instruction: instruction(nodeTag, lastInstruction.AsString()).WithDebug(e.debugComment(node)), - SourceLine: e.newSourceLine(node.GetPosition()), - } - - code = append(code, e.resetCtxType()) - - code = e.wrapCodeWithSourceLines(code) - - return code, nil -} - -func (e *Template) resetCtxType() template.Line { - return template.Line{ - Instruction: e.instructions.NewSetCtxType(EvaluationCtxDialectName), - } -} - -func (e *Template) debugComment(node yamlmeta.Node) string { - var details string - - switch typedNode := node.(type) { - case *yamlmeta.MapItem: - details = fmt.Sprintf(" key=%s", typedNode.Key) - case *yamlmeta.ArrayItem: - details = " idx=?" - } - - return fmt.Sprintf("%T%s", node, details) // TODO, node.GetRef()) -} - -func (e *Template) newSourceLine(pos *filepos.Position) *template.SourceLine { - if pos.IsKnown() { - if content, ok := e.sourceCodeLines()[pos.LineNum()]; ok { - return template.NewSourceLine(pos, content) - } - } - return nil -} - -func (e *Template) sourceCodeLines() map[int]string { - if e.srcLinesByLine != nil { - return e.srcLinesByLine - } - - e.srcLinesByLine = map[int]string{} - - if sourceCode, present := e.docSet.AsSourceBytes(); present { - for i, line := range bytes.Split(sourceCode, []byte("\n")) { - e.srcLinesByLine[filepos.NewPosition(i+1).LineNum()] = string(line) - } - } - - return e.srcLinesByLine -} - -func (e *Template) wrapCodeWithSourceLines(code []template.Line) []template.Line { - var wrappedCode []template.Line - for _, line := range code { - if line.SourceLine != nil { - newSrcLine := e.newSourceLine(line.SourceLine.Position) - if newSrcLine == nil { - panic("Expected to find associated source line") - } - newSrcLine.Selection = line.SourceLine - line.SourceLine = newSrcLine - } - wrappedCode = append(wrappedCode, line) - } - return wrappedCode -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/all.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/all.go deleted file mode 100644 index 95868e53a..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/all.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yttlibrary/overlay" -) - -type API struct { - modules map[string]starlark.StringDict -} - -func NewAPI(replaceNodeFunc tplcore.StarlarkFunc, dataMod DataModule, - libraryMod starlark.StringDict) API { - - return API{map[string]starlark.StringDict{ - "assert": AssertAPI, - "regexp": RegexpAPI, - - // Hashes - "md5": MD5API, - "sha256": SHA256API, - - // Serializations - "base64": Base64API, - "json": JSONAPI, - "yaml": YAMLAPI, - "url": URLAPI, - - // Templating - "template": NewTemplateModule(replaceNodeFunc).AsModule(), - "data": dataMod.AsModule(), - - // Object building - "struct": StructAPI, - "module": ModuleAPI, - "overlay": overlay.API, - - // Versioning - "version": VersionAPI, - - "library": libraryMod, - }} -} - -func (a API) FindModule(module string) (starlark.StringDict, error) { - if module, found := a.modules[module]; found { - return module, nil - } - return nil, fmt.Errorf("builtin ytt library does not have module '%s' "+ - "(hint: is it available in newer version of ytt?)", module) -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/assert.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/assert.go deleted file mode 100644 index 103d10611..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/assert.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - AssertAPI = starlark.StringDict{ - "assert": &starlarkstruct.Module{ - Name: "assert", - Members: starlark.StringDict{ - "fail": starlark.NewBuiltin("assert.fail", core.ErrWrapper(assertModule{}.Fail)), - }, - }, - } -) - -type assertModule struct{} - -func (b assertModule) Fail(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - return starlark.None, fmt.Errorf("fail: %s", val) -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/base64.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/base64.go deleted file mode 100644 index 07c560818..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/base64.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "encoding/base64" - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - Base64API = starlark.StringDict{ - "base64": &starlarkstruct.Module{ - Name: "base64", - Members: starlark.StringDict{ - "encode": starlark.NewBuiltin("base64.encode", core.ErrWrapper(base64Module{}.Encode)), - "decode": starlark.NewBuiltin("base64.decode", core.ErrWrapper(base64Module{}.Decode)), - }, - }, - } -) - -type base64Module struct{} - -func (b base64Module) Encode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - encoding, err := b.buildEncoding(kwargs) - if err != nil { - return starlark.None, err - } - - return starlark.String(encoding.EncodeToString([]byte(val))), nil -} - -func (b base64Module) Decode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - encoding, err := b.buildEncoding(kwargs) - if err != nil { - return starlark.None, err - } - - valDecoded, err := encoding.DecodeString(val) - if err != nil { - return starlark.None, err - } - - return starlark.String(string(valDecoded)), nil -} - -func (b base64Module) buildEncoding(kwargs []starlark.Tuple) (*base64.Encoding, error) { - var encoding *base64.Encoding = base64.StdEncoding - - isURL, err := core.BoolArg(kwargs, "url") - if err != nil { - return nil, err - } - if isURL { - encoding = base64.URLEncoding - } - - isRaw, err := core.BoolArg(kwargs, "raw") - if err != nil { - return nil, err - } - if isRaw { - encoding = encoding.WithPadding(base64.NoPadding) - } - - isStrict, err := core.BoolArg(kwargs, "strict") - if err != nil { - return nil, err - } - if isStrict { - encoding = encoding.Strict() - } - - return encoding, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/data.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/data.go deleted file mode 100644 index f7bf530d4..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/data.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -type DataModule struct { - values starlark.Value - loader DataLoader -} - -type DataLoader interface { - FilePaths(string) ([]string, error) - FileData(string) ([]byte, error) -} - -func NewDataModule(values *yamlmeta.Document, loader DataLoader) DataModule { - val := core.NewGoValueWithOpts(values.AsInterface(), core.GoValueOpts{MapIsStruct: true}) - return DataModule{val.AsStarlarkValue(), loader} -} - -func (b DataModule) AsModule() starlark.StringDict { - return starlark.StringDict{ - "data": &starlarkstruct.Module{ - Name: "data", - Members: starlark.StringDict{ - "list": starlark.NewBuiltin("data.list", core.ErrWrapper(b.List)), - "read": starlark.NewBuiltin("data.read", core.ErrWrapper(b.Read)), - // TODO write? - "values": b.values, - }, - }, - } -} - -func (b DataModule) List(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() > 1 { - return starlark.None, fmt.Errorf("expected exactly zero or one argument") - } - - path := "" - - if args.Len() == 1 { - pathStr, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - path = pathStr - } - - paths, err := b.loader.FilePaths(path) - if err != nil { - return starlark.None, err - } - - result := []starlark.Value{} - for _, path := range paths { - result = append(result, starlark.String(path)) - } - return starlark.NewList(result), nil -} - -func (b DataModule) Read(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - path, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - fileBs, err := b.loader.FileData(path) - if err != nil { - return starlark.None, err - } - - return starlark.String(string(fileBs)), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/json.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/json.go deleted file mode 100644 index 9171d2588..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/json.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/orderedmap" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -var ( - JSONAPI = starlark.StringDict{ - "json": &starlarkstruct.Module{ - Name: "json", - Members: starlark.StringDict{ - "encode": starlark.NewBuiltin("json.encode", core.ErrWrapper(jsonModule{}.Encode)), - "decode": starlark.NewBuiltin("json.decode", core.ErrWrapper(jsonModule{}.Decode)), - }, - }, - } - JSONKWARGS = map[string]struct{}{ - "indent": struct{}{}, - } -) - -type jsonModule struct{} - -func (b jsonModule) Encode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - if err := core.CheckArgNames(kwargs, JSONKWARGS); err != nil { - return starlark.None, err - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - val = orderedmap.Conversion{yamlmeta.NewGoFromAST(val)}.AsUnorderedStringMaps() - - var valBs []byte - indent, err := core.Int64Arg(kwargs, "indent") - if err != nil { - return starlark.None, err - } - if indent > 4 || indent < 0 { - return starlark.None, fmt.Errorf("indent value must be between 0 and 4") - } - if indent > 0 { - valBs, err = json.MarshalIndent(val, "", strings.Repeat(" ", int(indent))) - } else { - valBs, err = json.Marshal(val) - } - - if err != nil { - return starlark.None, err - } - - return starlark.String(string(valBs)), nil -} - -func (b jsonModule) Decode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - valEncoded, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - var valDecoded interface{} - - err = json.Unmarshal([]byte(valEncoded), &valDecoded) - if err != nil { - return starlark.None, err - } - - valDecoded = orderedmap.Conversion{valDecoded}.FromUnorderedMaps() - - return core.NewGoValue(valDecoded).AsStarlarkValue(), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/md5.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/md5.go deleted file mode 100644 index cd931b15c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/md5.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "crypto/md5" - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - MD5API = starlark.StringDict{ - "md5": &starlarkstruct.Module{ - Name: "md5", - Members: starlark.StringDict{ - "sum": starlark.NewBuiltin("md5.sum", core.ErrWrapper(md5Module{}.Sum)), - }, - }, - } -) - -type md5Module struct{} - -func (b md5Module) Sum(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - return starlark.String(fmt.Sprintf("%x", md5.Sum([]byte(val)))), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/module.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/module.go deleted file mode 100644 index 71ad9866a..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/module.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - ModuleAPI = starlark.StringDict{ - "module": &starlarkstruct.Module{ - Name: "module", - Members: starlark.StringDict{ - "make": starlark.NewBuiltin("module.make", core.ErrWrapper(starlarkstruct.MakeModule)), - }, - }, - } -) diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/annotations.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/annotations.go deleted file mode 100644 index b31793adb..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/annotations.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -const ( - AnnotationNs template.AnnotationNs = "overlay" - - AnnotationMerge template.AnnotationName = "overlay/merge" // default - AnnotationRemove template.AnnotationName = "overlay/remove" - AnnotationReplace template.AnnotationName = "overlay/replace" - AnnotationInsert template.AnnotationName = "overlay/insert" // array only - AnnotationAppend template.AnnotationName = "overlay/append" // array only - AnnotationAssert template.AnnotationName = "overlay/assert" - - AnnotationMatch template.AnnotationName = "overlay/match" - AnnotationMatchChildDefaults template.AnnotationName = "overlay/match-child-defaults" -) - -var ( - allOps = []template.AnnotationName{ - AnnotationMerge, - AnnotationRemove, - AnnotationReplace, - AnnotationInsert, - AnnotationAppend, - AnnotationAssert, - } -) - -func whichOp(node yamlmeta.Node) (template.AnnotationName, error) { - var foundOp template.AnnotationName - - for _, op := range allOps { - if template.NewAnnotations(node).Has(op) { - if len(foundOp) > 0 { - return "", fmt.Errorf("Expected to find only one overlay operation") - } - foundOp = op - } - } - - if len(foundOp) == 0 { - foundOp = AnnotationMerge - } - - return foundOp, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/api.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/api.go deleted file mode 100644 index 147c8bb49..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/api.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - "reflect" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -var ( - API = starlark.StringDict{ - "overlay": &starlarkstruct.Module{ - Name: "overlay", - Members: starlark.StringDict{ - "apply": starlark.NewBuiltin("overlay.apply", core.ErrWrapper(overlayModule{}.Apply)), - "index": starlark.NewBuiltin("overlay.index", core.ErrWrapper(overlayModule{}.Index)), - "all": starlark.NewBuiltin("overlay.all", core.ErrWrapper(overlayModule{}.All)), - "map_key": overlayModule{}.MapKey(), - "subset": starlark.NewBuiltin("overlay.subset", core.ErrWrapper(overlayModule{}.Subset)), - - "and_op": starlark.NewBuiltin("overlay.and_op", core.ErrWrapper(overlayModule{}.AndOp)), - "or_op": starlark.NewBuiltin("overlay.or_op", core.ErrWrapper(overlayModule{}.OrOp)), - "not_op": starlark.NewBuiltin("overlay.not_op", core.ErrWrapper(overlayModule{}.NotOp)), - }, - }, - } -) - -type overlayModule struct{} - -func (b overlayModule) Apply( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() == 0 { - return starlark.None, fmt.Errorf("expected exactly at least argument") - } - - goValue, err := core.NewStarlarkValue(args).AsGoValue() - if err != nil { - return starlark.None, err - } - typedVals := goValue.([]interface{}) - var result interface{} = typedVals[0] - - for _, right := range typedVals[1:] { - var err error - result, err = Op{Left: result, Right: right, Thread: thread}.Apply() // left is modified - if err != nil { - return starlark.None, err - } - } - - return yamltemplate.NewStarlarkFragment(result), nil -} - -func (b overlayModule) Index( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - expectedIdx64, err := core.NewStarlarkValue(args.Index(0)).AsInt64() - if err != nil { - return starlark.None, err - } - - matchFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - idx64, err := core.NewStarlarkValue(args.Index(0)).AsInt64() - if err != nil { - return starlark.None, err - } - - if expectedIdx64 == idx64 { - return starlark.Bool(true), nil - } - - return starlark.Bool(false), nil - } - - return starlark.NewBuiltin("overlay.index_matcher", core.ErrWrapper(matchFunc)), nil -} - -func (b overlayModule) All( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - return starlark.Bool(true), nil -} - -func (b overlayModule) MapKey() *starlark.Builtin { - return starlark.NewBuiltin("overlay.map_key", core.ErrWrapper(b.mapKey)) -} - -func (b overlayModule) mapKey( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - keyName, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - matchFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - oldVal, err := core.NewStarlarkValue(args.Index(1)).AsGoValue() - if err != nil { - return starlark.None, err - } - newVal, err := core.NewStarlarkValue(args.Index(2)).AsGoValue() - if err != nil { - return starlark.None, err - } - - result, err := b.compareByMapKey(keyName, oldVal, newVal) - if err != nil { - return nil, err - } - - return starlark.Bool(result), nil - } - - return starlark.NewBuiltin("overlay.map_key_matcher", core.ErrWrapper(matchFunc)), nil -} - -func (b overlayModule) compareByMapKey(keyName string, oldVal, newVal interface{}) (bool, error) { - oldKeyVal, err := b.pullOutMapValue(keyName, oldVal) - if err != nil { - return false, err - } - - newKeyVal, err := b.pullOutMapValue(keyName, newVal) - if err != nil { - return false, err - } - - result, _ := Comparison{}.CompareLeafs(oldKeyVal, newKeyVal) - return result, nil -} - -func (b overlayModule) pullOutMapValue(keyName string, val interface{}) (interface{}, error) { - typedMap, ok := val.(*yamlmeta.Map) - if !ok { - return starlark.None, fmt.Errorf("Expected value to be map, but was %T", val) - } - - for _, item := range typedMap.Items { - if reflect.DeepEqual(item.Key, keyName) { - return item.Value, nil - } - } - - return starlark.None, fmt.Errorf("Expected to find mapitem with key '%s', but did not", keyName) -} - -func (b overlayModule) Subset( - thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - expectedArg := args.Index(0) - - matchFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - leftVal, err := core.NewStarlarkValue(args.Index(1)).AsGoValue() - if err != nil { - return starlark.None, err - } - expectedVal, err := core.NewStarlarkValue(expectedArg).AsGoValue() - if err != nil { - return starlark.None, err - } - - actualObj := yamlmeta.NewASTFromInterface(leftVal) - expectedObj := yamlmeta.NewASTFromInterface(expectedVal) - - if _, ok := actualObj.(*yamlmeta.ArrayItem); ok { - expectedObj = &yamlmeta.ArrayItem{Value: expectedObj} - } - if _, ok := actualObj.(*yamlmeta.Document); ok { - expectedObj = &yamlmeta.Document{Value: expectedObj} - } - - result, _ := Comparison{}.Compare(actualObj, expectedObj) - return starlark.Bool(result), nil - } - - return starlark.NewBuiltin("overlay.subset_matcher", core.ErrWrapper(matchFunc)), nil -} - -func (b overlayModule) AndOp( - thread *starlark.Thread, f *starlark.Builtin, - andArgs starlark.Tuple, andKwargs []starlark.Tuple) (starlark.Value, error) { - - if andArgs.Len() < 1 { - return starlark.None, fmt.Errorf("expected at least one argument") - } - - matchFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - for _, andArg := range andArgs { - result, err := starlark.Call(thread, andArg, args, kwargs) - if err != nil { - return nil, err - } - resultBool, err := core.NewStarlarkValue(result).AsBool() - if err != nil { - return nil, err - } - if !resultBool { - return starlark.Bool(false), nil - } - } - - return starlark.Bool(true), nil - } - - return starlark.NewBuiltin("overlay.and_op", core.ErrWrapper(matchFunc)), nil -} - -func (b overlayModule) OrOp( - thread *starlark.Thread, f *starlark.Builtin, - orArgs starlark.Tuple, orKwargs []starlark.Tuple) (starlark.Value, error) { - - if orArgs.Len() < 1 { - return starlark.None, fmt.Errorf("expected at least one argument") - } - - matchFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - for _, orArg := range orArgs { - result, err := starlark.Call(thread, orArg, args, kwargs) - if err != nil { - return nil, err - } - resultBool, err := core.NewStarlarkValue(result).AsBool() - if err != nil { - return nil, err - } - if resultBool { - return starlark.Bool(true), nil - } - } - - return starlark.Bool(false), nil - } - - return starlark.NewBuiltin("overlay.or_op", core.ErrWrapper(matchFunc)), nil -} - -func (b overlayModule) NotOp( - thread *starlark.Thread, f *starlark.Builtin, - notArgs starlark.Tuple, notKwargs []starlark.Tuple) (starlark.Value, error) { - - if notArgs.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - matchFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - result, err := starlark.Call(thread, notArgs[0], args, kwargs) - if err != nil { - return nil, err - } - resultBool, err := core.NewStarlarkValue(result).AsBool() - if err != nil { - return nil, err - } - return starlark.Bool(!resultBool), nil - } - - return starlark.NewBuiltin("overlay.or_op", core.ErrWrapper(matchFunc)), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/array.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/array.go deleted file mode 100644 index b6ee71c9f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/array.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "github.com/k14s/ytt/pkg/yamlmeta" -) - -func (o Op) mergeArrayItem( - leftArray *yamlmeta.Array, newItem *yamlmeta.ArrayItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - matchChildDefaults, err := NewMatchChildDefaultsAnnotation(newItem, parentMatchChildDefaults) - if err != nil { - return err - } - - ann, err := NewArrayItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftArray) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - if len(leftIdxs) == 0 { - return o.appendArrayItem(leftArray, newItem) - } - - for _, leftIdx := range leftIdxs { - replace := true - if leftArray.Items[leftIdx].Value != nil { - replace, err = o.apply(leftArray.Items[leftIdx].Value, newItem.Value, matchChildDefaults) - if err != nil { - return err - } - } - if replace { - // left side type and metas are preserved - err := leftArray.Items[leftIdx].SetValue(newItem.Value) - if err != nil { - return err - } - leftArray.Items[leftIdx].SetPosition(newItem.Position) - } - } - - return nil -} - -func (o Op) removeArrayItem( - leftArray *yamlmeta.Array, newItem *yamlmeta.ArrayItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewArrayItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftArray) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - leftArray.Items[leftIdx] = nil - } - - // Prune out all nil items - updatedItems := []*yamlmeta.ArrayItem{} - - for _, item := range leftArray.Items { - if item != nil { - updatedItems = append(updatedItems, item) - } - } - - leftArray.Items = updatedItems - - return nil -} - -func (o Op) replaceArrayItem( - leftArray *yamlmeta.Array, newItem *yamlmeta.ArrayItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewArrayItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - replaceAnn, err := NewReplaceAnnotation(newItem, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftArray) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - newVal, err := replaceAnn.Value(leftArray.Items[leftIdx]) - if err != nil { - return err - } - - // left side fields are not preserved. - // probably need to rethink how to merge left and right once those fields are needed - leftArray.Items[leftIdx] = newItem.DeepCopy() - err = leftArray.Items[leftIdx].SetValue(newVal) - if err != nil { - return err - } - } - - if len(leftIdxs) == 0 && replaceAnn.OrAdd() { - newVal, err := replaceAnn.Value(nil) - if err != nil { - return err - } - - leftArray.Items = append(leftArray.Items, newItem.DeepCopy()) - err = leftArray.Items[len(leftArray.Items)-1].SetValue(newVal) - if err != nil { - return err - } - } - - return nil -} - -func (o Op) insertArrayItem( - leftArray *yamlmeta.Array, newItem *yamlmeta.ArrayItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewArrayItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftArray) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - insertAnn, err := NewInsertAnnotation(newItem) - if err != nil { - return err - } - - updatedItems := []*yamlmeta.ArrayItem{} - - for i, leftItem := range leftArray.Items { - matched := false - for _, leftIdx := range leftIdxs { - if i == leftIdx { - matched = true - if insertAnn.IsBefore() { - updatedItems = append(updatedItems, newItem.DeepCopy()) - } - updatedItems = append(updatedItems, leftItem) - if insertAnn.IsAfter() { - updatedItems = append(updatedItems, newItem.DeepCopy()) - } - break - } - } - if !matched { - updatedItems = append(updatedItems, leftItem) - } - } - - leftArray.Items = updatedItems - - return nil -} - -func (o Op) appendArrayItem( - leftArray *yamlmeta.Array, newItem *yamlmeta.ArrayItem) error { - - // No need to traverse further - leftArray.Items = append(leftArray.Items, newItem.DeepCopy()) - return nil -} - -func (o Op) assertArrayItem( - leftArray *yamlmeta.Array, newItem *yamlmeta.ArrayItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - matchChildDefaults, err := NewMatchChildDefaultsAnnotation(newItem, parentMatchChildDefaults) - if err != nil { - return err - } - - ann, err := NewArrayItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - testAnn, err := NewAssertAnnotation(newItem, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftArray) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - err := testAnn.Check(leftArray.Items[leftIdx]) - if err != nil { - return err - } - - _, err = o.apply(leftArray.Items[leftIdx].Value, newItem.Value, matchChildDefaults) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/array_item_match_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/array_item_match_annotation.go deleted file mode 100644 index 41df874a9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/array_item_match_annotation.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -type ArrayItemMatchAnnotation struct { - newItem *yamlmeta.ArrayItem - thread *starlark.Thread - - matcher *starlark.Value - expects MatchAnnotationExpectsKwarg - unannotated bool -} - -func NewArrayItemMatchAnnotation(newItem *yamlmeta.ArrayItem, - defaults MatchChildDefaultsAnnotation, - thread *starlark.Thread) (ArrayItemMatchAnnotation, error) { - - annotation := ArrayItemMatchAnnotation{ - newItem: newItem, - thread: thread, - expects: MatchAnnotationExpectsKwarg{thread: thread}, - } - anns := template.NewAnnotations(newItem) - - if !anns.Has(AnnotationMatch) { - var expectsNone starlark.Value = starlark.MakeInt(0) - annotation.expects = MatchAnnotationExpectsKwarg{ - expects: &expectsNone, - thread: thread, - } - annotation.unannotated = true - return annotation, nil - } - - kwargs := anns.Kwargs(AnnotationMatch) - if len(kwargs) == 0 { - return annotation, fmt.Errorf("Expected '%s' annotation to have "+ - "at least one keyword argument (by=..., expects=...)", AnnotationMatch) - } - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - switch kwargName { - case MatchAnnotationKwargBy: - annotation.matcher = &kwarg[1] - case MatchAnnotationKwargExpects: - annotation.expects.expects = &kwarg[1] - case MatchAnnotationKwargMissingOK: - annotation.expects.missingOK = &kwarg[1] - case MatchAnnotationKwargWhen: - annotation.expects.when = &kwarg[1] - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationMatch, kwargName) - } - } - - annotation.expects.FillInDefaults(defaults) - - return annotation, nil -} - -func (a ArrayItemMatchAnnotation) Indexes(leftArray *yamlmeta.Array) ([]int, error) { - idxs, matches, err := a.MatchNodes(leftArray) - if err != nil { - return nil, err - } - - return idxs, a.expects.Check(matches) -} - -func (a ArrayItemMatchAnnotation) MatchNodes(leftArray *yamlmeta.Array) ([]int, []*filepos.Position, error) { - if a.unannotated { - return nil, nil, nil - } - - matcher := a.matcher - - if matcher == nil { - return nil, nil, fmt.Errorf("Expected '%s' annotation "+ - "keyword argument 'by' to be specified", AnnotationMatch) - } - - if _, ok := (*matcher).(starlark.String); ok { - matcherFunc, err := starlark.Call(a.thread, overlayModule{}.MapKey(), - starlark.Tuple{*matcher}, []starlark.Tuple{}) - if err != nil { - return nil, nil, err - } - - matcher = &matcherFunc - } - - switch typedVal := (*matcher).(type) { - case starlark.Callable: - var leftIdxs []int - var matches []*filepos.Position - - for i, item := range leftArray.Items { - matcherArgs := starlark.Tuple{ - starlark.MakeInt(i), - yamltemplate.NewGoValueWithYAML(item.Value).AsStarlarkValue(), - yamltemplate.NewGoValueWithYAML(a.newItem.Value).AsStarlarkValue(), - } - - // TODO check thread correctness - result, err := starlark.Call(a.thread, *matcher, matcherArgs, []starlark.Tuple{}) - if err != nil { - return nil, nil, err - } - - resultBool, err := tplcore.NewStarlarkValue(result).AsBool() - if err != nil { - return nil, nil, err - } - if resultBool { - leftIdxs = append(leftIdxs, i) - matches = append(matches, item.Position) - } - } - - return leftIdxs, matches, nil - - default: - return nil, nil, fmt.Errorf("Expected '%s' annotation keyword argument 'by' "+ - "to be either string (for map key) or function, but was %T", AnnotationMatch, typedVal) - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/assert_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/assert_annotation.go deleted file mode 100644 index c3661fea0..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/assert_annotation.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -type AssertAnnotation struct { - newNode template.EvaluationNode - thread *starlark.Thread - via *starlark.Value -} - -func NewAssertAnnotation(newNode template.EvaluationNode, thread *starlark.Thread) (AssertAnnotation, error) { - annotation := AssertAnnotation{ - newNode: newNode, - thread: thread, - } - kwargs := template.NewAnnotations(newNode).Kwargs(AnnotationAssert) - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - switch kwargName { - case "via": - annotation.via = &kwarg[1] - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationAssert, kwargName) - } - } - - return annotation, nil -} - -func (a AssertAnnotation) Check(existingNode template.EvaluationNode) error { - // Make sure original nodes are not affected in any way - existingNode = existingNode.DeepCopyAsInterface().(template.EvaluationNode) - newNode := a.newNode.DeepCopyAsInterface().(template.EvaluationNode) - - // TODO currently assumes that we can always get at least one value - existingVal := existingNode.GetValues()[0] - newVal := newNode.GetValues()[0] - - if a.via == nil { - actualObj := yamlmeta.NewASTFromInterface(existingVal) - expectedObj := yamlmeta.NewASTFromInterface(newVal) - - // TODO use generic equal function from our library? - equal, desc := Comparison{}.Compare(actualObj, expectedObj) - if !equal { - return fmt.Errorf("Expected objects to equal, but did not: %s", desc) - } - return nil - } - - switch typedVal := (*a.via).(type) { - case starlark.Callable: - viaArgs := starlark.Tuple{ - yamltemplate.NewGoValueWithYAML(existingVal).AsStarlarkValue(), - yamltemplate.NewGoValueWithYAML(newVal).AsStarlarkValue(), - } - - result, err := starlark.Call(a.thread, *a.via, viaArgs, []starlark.Tuple{}) - if err != nil { - return err - } - - switch typedResult := result.(type) { - case nil, starlark.NoneType: - // Assume if via didnt error then it's successful - return nil - - case starlark.Bool: - if !bool(typedResult) { - return fmt.Errorf("Expected via invocation to return true, but was false") - } - return nil - - default: - result, err := tplcore.NewStarlarkValue(result).AsGoValue() - if err != nil { - return err - } - - // Extract result tuple(bool, string) to determine success - if typedResult, ok := result.([]interface{}); ok { - if len(typedResult) == 2 { - resultSuccess, ok1 := typedResult[0].(bool) - resultMsg, ok2 := typedResult[1].(string) - if ok1 && ok2 { - if !resultSuccess { - return fmt.Errorf("Expected via invocation to return true, "+ - "but was false with message: %s", resultMsg) - } - return nil - } - } - } - - return fmt.Errorf("Expected via invocation to return NoneType, " + - "Bool or Tuple(Bool,String), but returned neither of those") - } - - default: - return fmt.Errorf("Expected '%s' annotation keyword argument 'via'"+ - " to be function, but was %T", AnnotationAssert, typedVal) - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/compare.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/compare.go deleted file mode 100644 index 9b3a0591b..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/compare.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - "reflect" - - "github.com/k14s/ytt/pkg/yamlmeta" -) - -type Comparison struct{} - -func (b Comparison) Compare(left, right interface{}) (bool, string) { - switch typedRight := right.(type) { - case *yamlmeta.DocumentSet: - panic("Unexpected docset") - - case *yamlmeta.Document: - typedLeft, isDoc := left.(*yamlmeta.Document) - if !isDoc { - return false, fmt.Sprintf("Expected doc, but was %T", left) - } - - return b.Compare(typedLeft.Value, typedRight.Value) - - case *yamlmeta.Map: - typedLeft, isMap := left.(*yamlmeta.Map) - if !isMap { - return false, fmt.Sprintf("Expected map, but was %T", left) - } - - for _, rightItem := range typedRight.Items { - matched := false - for _, leftItem := range typedLeft.Items { - if reflect.DeepEqual(leftItem.Key, rightItem.Key) { - result, explain := b.Compare(leftItem, rightItem) - if !result { - return false, explain - } - matched = true - } - } - if !matched { - return false, "Expected at least one map item to match by key" - } - } - - return true, "" - - case *yamlmeta.MapItem: - typedLeft, isMapItem := left.(*yamlmeta.MapItem) - if !isMapItem { - return false, fmt.Sprintf("Expected mapitem, but was %T", left) - } - - return b.Compare(typedLeft.Value, typedRight.Value) - - case *yamlmeta.Array: - typedLeft, isArray := left.(*yamlmeta.Array) - if !isArray { - return false, fmt.Sprintf("Expected array, but was %T", left) - } - - for i, item := range typedRight.Items { - if i >= len(typedLeft.Items) { - return false, "Expected to have matching number of array items" - } - result, explain := b.Compare(typedLeft.Items[i].Value, item.Value) - if !result { - return false, explain - } - } - - return true, "" - - case *yamlmeta.ArrayItem: - typedLeft, isArrayItem := left.(*yamlmeta.ArrayItem) - if !isArrayItem { - return false, fmt.Sprintf("Expected arrayitem, but was %T", left) - } - - return b.Compare(typedLeft.Value, typedRight.Value) - - default: - return b.CompareLeafs(left, right) - } -} - -func (b Comparison) CompareLeafs(left, right interface{}) (bool, string) { - if reflect.DeepEqual(left, right) { - return true, "" - } - - if result, _ := b.compareAsInt64s(left, right); result { - return true, "" - } - - return false, fmt.Sprintf("Expected leaf values to match %T %T", left, right) -} - -func (b Comparison) compareAsInt64s(left, right interface{}) (bool, string) { - leftVal, ok := b.upcastToInt64(left) - if !ok { - return false, "Left obj is upcastable to int64" - } - - rightVal, ok := b.upcastToInt64(right) - if !ok { - return false, "Right obj is upcastable to int64" - } - - return leftVal == rightVal, "Left and right numbers are not equal" -} - -func (b Comparison) upcastToInt64(val interface{}) (int64, bool) { - switch typedVal := val.(type) { - case int: - return int64(typedVal), true - case int16: - return int64(typedVal), true - case int32: - return int64(typedVal), true - case int64: - return int64(typedVal), true - case int8: - return int64(typedVal), true - default: - return 0, false - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/document.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/document.go deleted file mode 100644 index a015ce4ad..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/document.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "github.com/k14s/ytt/pkg/yamlmeta" -) - -func (o Op) mergeDocument( - leftDocSets []*yamlmeta.DocumentSet, newDoc *yamlmeta.Document, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - matchChildDefaults, err := NewMatchChildDefaultsAnnotation(newDoc, parentMatchChildDefaults) - if err != nil { - return err - } - - ann, err := NewDocumentMatchAnnotation(newDoc, parentMatchChildDefaults, o.ExactMatch, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.IndexTuples(leftDocSets) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - replace := true - if leftDocSets[leftIdx[0]].Items[leftIdx[1]].Value != nil { - replace, err = o.apply(leftDocSets[leftIdx[0]].Items[leftIdx[1]].Value, newDoc.Value, matchChildDefaults) - if err != nil { - return err - } - } - if replace { - leftDocSets[leftIdx[0]].Items[leftIdx[1]].Value = newDoc.Value - } - } - - return nil -} - -func (o Op) removeDocument( - leftDocSets []*yamlmeta.DocumentSet, newDoc *yamlmeta.Document, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewDocumentMatchAnnotation(newDoc, parentMatchChildDefaults, o.ExactMatch, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.IndexTuples(leftDocSets) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - leftDocSets[leftIdx[0]].Items[leftIdx[1]] = nil - } - - // Prune out all nil documents - for _, leftDocSet := range leftDocSets { - updatedDocs := []*yamlmeta.Document{} - - for _, item := range leftDocSet.Items { - if item != nil { - updatedDocs = append(updatedDocs, item) - } - } - - leftDocSet.Items = updatedDocs - } - - return nil -} - -func (o Op) replaceDocument( - leftDocSets []*yamlmeta.DocumentSet, newDoc *yamlmeta.Document, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewDocumentMatchAnnotation(newDoc, parentMatchChildDefaults, o.ExactMatch, o.Thread) - if err != nil { - return err - } - - replaceAnn, err := NewReplaceAnnotation(newDoc, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.IndexTuples(leftDocSets) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - newVal, err := replaceAnn.Value(leftDocSets[leftIdx[0]].Items[leftIdx[1]]) - if err != nil { - return err - } - - leftDocSets[leftIdx[0]].Items[leftIdx[1]] = newDoc.DeepCopy() - err = leftDocSets[leftIdx[0]].Items[leftIdx[1]].SetValue(newVal) - if err != nil { - return err - } - } - - if len(leftIdxs) == 0 && replaceAnn.OrAdd() { - if len(leftDocSets) == 0 { - panic("Internal inconsistency: Expected at least one doc set") - } - - newVal, err := replaceAnn.Value(nil) - if err != nil { - return err - } - - leftDocSets[0].Items = append(leftDocSets[0].Items, newDoc.DeepCopy()) - err = leftDocSets[0].Items[len(leftDocSets[0].Items)-1].SetValue(newVal) - if err != nil { - return err - } - } - - return nil -} - -func (o Op) insertDocument( - leftDocSets []*yamlmeta.DocumentSet, newDoc *yamlmeta.Document, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewDocumentMatchAnnotation(newDoc, parentMatchChildDefaults, o.ExactMatch, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.IndexTuples(leftDocSets) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - insertAnn, err := NewInsertAnnotation(newDoc) - if err != nil { - return err - } - - for i, leftDocSet := range leftDocSets { - updatedDocs := []*yamlmeta.Document{} - - for j, leftItem := range leftDocSet.Items { - matched := false - for _, leftIdx := range leftIdxs { - if leftIdx[0] == i && leftIdx[1] == j { - matched = true - if insertAnn.IsBefore() { - updatedDocs = append(updatedDocs, newDoc.DeepCopy()) - } - updatedDocs = append(updatedDocs, leftItem) - if insertAnn.IsAfter() { - updatedDocs = append(updatedDocs, newDoc.DeepCopy()) - } - break - } - } - if !matched { - updatedDocs = append(updatedDocs, leftItem) - } - } - - leftDocSet.Items = updatedDocs - } - - return nil -} - -func (o Op) appendDocument( - leftDocSets []*yamlmeta.DocumentSet, newDoc *yamlmeta.Document) error { - - // No need to traverse further - leftDocSets[len(leftDocSets)-1].Items = append(leftDocSets[len(leftDocSets)-1].Items, newDoc.DeepCopy()) - return nil -} - -func (o Op) assertDocument( - leftDocSets []*yamlmeta.DocumentSet, newDoc *yamlmeta.Document, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - matchChildDefaults, err := NewMatchChildDefaultsAnnotation(newDoc, parentMatchChildDefaults) - if err != nil { - return err - } - - ann, err := NewDocumentMatchAnnotation(newDoc, parentMatchChildDefaults, o.ExactMatch, o.Thread) - if err != nil { - return err - } - - testAnn, err := NewAssertAnnotation(newDoc, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.IndexTuples(leftDocSets) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - err := testAnn.Check(leftDocSets[leftIdx[0]].Items[leftIdx[1]]) - if err != nil { - return err - } - - _, err = o.apply(leftDocSets[leftIdx[0]].Items[leftIdx[1]].Value, newDoc.Value, matchChildDefaults) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/document_match_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/document_match_annotation.go deleted file mode 100644 index c25e6ee85..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/document_match_annotation.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -type DocumentMatchAnnotation struct { - newDoc *yamlmeta.Document - exact bool - thread *starlark.Thread - - matcher *starlark.Value - expects MatchAnnotationExpectsKwarg -} - -func NewDocumentMatchAnnotation(newDoc *yamlmeta.Document, - defaults MatchChildDefaultsAnnotation, - exact bool, thread *starlark.Thread) (DocumentMatchAnnotation, error) { - - annotation := DocumentMatchAnnotation{ - newDoc: newDoc, - exact: exact, - thread: thread, - expects: MatchAnnotationExpectsKwarg{thread: thread}, - } - anns := template.NewAnnotations(newDoc) - - kwargs := anns.Kwargs(AnnotationMatch) - if !exact && len(kwargs) == 0 { - return annotation, fmt.Errorf("Expected '%s' annotation to have "+ - "at least one keyword argument (by=..., expects=...)", AnnotationMatch) - } - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - switch kwargName { - case "by": - annotation.matcher = &kwarg[1] - case MatchAnnotationKwargExpects: - annotation.expects.expects = &kwarg[1] - case MatchAnnotationKwargMissingOK: - annotation.expects.missingOK = &kwarg[1] - case MatchAnnotationKwargWhen: - annotation.expects.when = &kwarg[1] - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationMatch, kwargName) - } - } - - annotation.expects.FillInDefaults(defaults) - - return annotation, nil -} - -func (a DocumentMatchAnnotation) IndexTuples(leftDocSets []*yamlmeta.DocumentSet) ([][]int, error) { - idxs, matches, err := a.MatchNodes(leftDocSets) - if err != nil { - return nil, err - } - - return idxs, a.expects.Check(matches) -} - -func (a DocumentMatchAnnotation) MatchNodes(leftDocSets []*yamlmeta.DocumentSet) ([][]int, []*filepos.Position, error) { - if a.exact { - if len(leftDocSets) != 1 && len(leftDocSets[0].Items) != 1 { - return nil, nil, fmt.Errorf("Expected to find exactly one left doc when merging exactly two documents") - } - return [][]int{{0, 0}}, []*filepos.Position{leftDocSets[0].Items[0].Position}, nil - } - - if a.matcher == nil { - return nil, nil, fmt.Errorf("Expected '%s' annotation "+ - "keyword argument 'by' to be specified", AnnotationMatch) - } - - switch typedVal := (*a.matcher).(type) { - case starlark.Callable: - var leftIdxs [][]int - var combinedIdx int - var matches []*filepos.Position - - for i, leftDocSet := range leftDocSets { - for j, item := range leftDocSet.Items { - matcherArgs := starlark.Tuple{ - starlark.MakeInt(combinedIdx), - yamltemplate.NewGoValueWithYAML(item.Value).AsStarlarkValue(), - yamltemplate.NewGoValueWithYAML(a.newDoc.Value).AsStarlarkValue(), - } - - // TODO check thread correctness - result, err := starlark.Call(a.thread, *a.matcher, matcherArgs, []starlark.Tuple{}) - if err != nil { - return nil, nil, err - } - - resultBool, err := tplcore.NewStarlarkValue(result).AsBool() - if err != nil { - return nil, nil, err - } - if resultBool { - leftIdxs = append(leftIdxs, []int{i, j}) - matches = append(matches, item.Position) - } - - combinedIdx++ - } - } - - return leftIdxs, matches, nil - - default: - return nil, nil, fmt.Errorf("Expected '%s' annotation keyword argument 'by'"+ - " to be function, but was %T", AnnotationMatch, typedVal) - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/insert_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/insert_annotation.go deleted file mode 100644 index 1c3aa1313..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/insert_annotation.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" - tplcore "github.com/k14s/ytt/pkg/template/core" -) - -type InsertAnnotation struct { - newItem template.EvaluationNode - before bool - after bool -} - -func NewInsertAnnotation(newItem template.EvaluationNode) (InsertAnnotation, error) { - annotation := InsertAnnotation{newItem: newItem} - anns := template.NewAnnotations(newItem) - - if !anns.Has(AnnotationInsert) { - return annotation, fmt.Errorf( - "Expected item to have '%s' annotation", AnnotationInsert) - } - - kwargs := anns.Kwargs(AnnotationInsert) - if len(kwargs) == 0 { - return annotation, fmt.Errorf("Expected '%s' annotation to have "+ - "at least one keyword argument (before=..., after=...)", AnnotationInsert) - } - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - - switch kwargName { - case "before": - resultBool, err := tplcore.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return InsertAnnotation{}, err - } - annotation.before = resultBool - - case "after": - resultBool, err := tplcore.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return InsertAnnotation{}, err - } - annotation.after = resultBool - - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationInsert, kwargName) - } - } - - return annotation, nil -} - -func (a InsertAnnotation) IsBefore() bool { return a.before } -func (a InsertAnnotation) IsAfter() bool { return a.after } diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/map.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/map.go deleted file mode 100644 index b9c592b5f..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/map.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "github.com/k14s/ytt/pkg/yamlmeta" -) - -func (o Op) mergeMapItem(leftMap *yamlmeta.Map, newItem *yamlmeta.MapItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - matchChildDefaults, err := NewMatchChildDefaultsAnnotation(newItem, parentMatchChildDefaults) - if err != nil { - return err - } - - ann, err := NewMapItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftMap) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - if len(leftIdxs) == 0 { - // No need to traverse further - leftMap.Items = append(leftMap.Items, newItem) - return nil - } - - for _, leftIdx := range leftIdxs { - replace := true - if leftMap.Items[leftIdx].Value != nil { - replace, err = o.apply(leftMap.Items[leftIdx].Value, newItem.Value, matchChildDefaults) - if err != nil { - return err - } - } - if replace { - // left side type and metas are preserved - err := leftMap.Items[leftIdx].SetValue(newItem.Value) - if err != nil { - return err - } - leftMap.Items[leftIdx].SetPosition(newItem.Position) - } - } - - return nil -} - -func (o Op) removeMapItem(leftMap *yamlmeta.Map, newItem *yamlmeta.MapItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewMapItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftMap) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - leftMap.Items[leftIdx] = nil - } - - // Prune out all nil items - updatedItems := []*yamlmeta.MapItem{} - - for _, item := range leftMap.Items { - if item != nil { - updatedItems = append(updatedItems, item) - } - } - - leftMap.Items = updatedItems - - return nil -} - -func (o Op) replaceMapItem(leftMap *yamlmeta.Map, newItem *yamlmeta.MapItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - ann, err := NewMapItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - replaceAnn, err := NewReplaceAnnotation(newItem, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftMap) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - newVal, err := replaceAnn.Value(leftMap.Items[leftIdx]) - if err != nil { - return err - } - - // left side fields are not preserved. - // probably need to rethink how to merge left and right once those fields are needed - leftMap.Items[leftIdx] = newItem.DeepCopy() - err = leftMap.Items[leftIdx].SetValue(newVal) - if err != nil { - return err - } - } - - if len(leftIdxs) == 0 && replaceAnn.OrAdd() { - newVal, err := replaceAnn.Value(nil) - if err != nil { - return err - } - - leftMap.Items = append(leftMap.Items, newItem.DeepCopy()) - err = leftMap.Items[len(leftMap.Items)-1].SetValue(newVal) - if err != nil { - return err - } - } - - return nil -} - -func (o Op) assertMapItem(leftMap *yamlmeta.Map, newItem *yamlmeta.MapItem, - parentMatchChildDefaults MatchChildDefaultsAnnotation) error { - - matchChildDefaults, err := NewMatchChildDefaultsAnnotation(newItem, parentMatchChildDefaults) - if err != nil { - return err - } - - ann, err := NewMapItemMatchAnnotation(newItem, parentMatchChildDefaults, o.Thread) - if err != nil { - return err - } - - testAnn, err := NewAssertAnnotation(newItem, o.Thread) - if err != nil { - return err - } - - leftIdxs, err := ann.Indexes(leftMap) - if err != nil { - if err, ok := err.(MatchAnnotationNumMatchError); ok && err.isConditional() { - return nil - } - return err - } - - for _, leftIdx := range leftIdxs { - err := testAnn.Check(leftMap.Items[leftIdx]) - if err != nil { - return err - } - - _, err = o.apply(leftMap.Items[leftIdx].Value, newItem.Value, matchChildDefaults) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/map_item_match_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/map_item_match_annotation.go deleted file mode 100644 index b0c9923cf..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/map_item_match_annotation.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - "reflect" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/filepos" - "github.com/k14s/ytt/pkg/template" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -type MapItemMatchAnnotation struct { - newItem *yamlmeta.MapItem - thread *starlark.Thread - - matcher *starlark.Value - expects MatchAnnotationExpectsKwarg -} - -func NewMapItemMatchAnnotation(newItem *yamlmeta.MapItem, - defaults MatchChildDefaultsAnnotation, - thread *starlark.Thread) (MapItemMatchAnnotation, error) { - - annotation := MapItemMatchAnnotation{ - newItem: newItem, - thread: thread, - expects: MatchAnnotationExpectsKwarg{thread: thread}, - } - kwargs := template.NewAnnotations(newItem).Kwargs(AnnotationMatch) - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - switch kwargName { - case MatchAnnotationKwargBy: - annotation.matcher = &kwarg[1] - case MatchAnnotationKwargExpects: - annotation.expects.expects = &kwarg[1] - case MatchAnnotationKwargMissingOK: - annotation.expects.missingOK = &kwarg[1] - case MatchAnnotationKwargWhen: - annotation.expects.when = &kwarg[1] - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationMatch, kwargName) - } - } - - annotation.expects.FillInDefaults(defaults) - - return annotation, nil -} - -func (a MapItemMatchAnnotation) Indexes(leftMap *yamlmeta.Map) ([]int, error) { - idxs, matches, err := a.MatchNodes(leftMap) - if err != nil { - return []int{}, err - } - - return idxs, a.expects.Check(matches) -} - -func (a MapItemMatchAnnotation) MatchNodes(leftMap *yamlmeta.Map) ([]int, []*filepos.Position, error) { - matcher := a.matcher - - if matcher == nil { - var leftIdxs []int - var matches []*filepos.Position - - for i, item := range leftMap.Items { - if reflect.DeepEqual(item.Key, a.newItem.Key) { - leftIdxs = append(leftIdxs, i) - matches = append(matches, item.Position) - } - } - return leftIdxs, matches, nil - } - - if _, ok := (*matcher).(starlark.String); ok { - matcherFunc, err := starlark.Call(a.thread, overlayModule{}.MapKey(), - starlark.Tuple{*matcher}, []starlark.Tuple{}) - if err != nil { - return nil, nil, err - } - - matcher = &matcherFunc - } - - switch typedVal := (*matcher).(type) { - case starlark.Callable: - var leftIdxs []int - var matches []*filepos.Position - - for i, item := range leftMap.Items { - matcherArgs := starlark.Tuple{ - yamltemplate.NewGoValueWithYAML(item.Key).AsStarlarkValue(), - yamltemplate.NewGoValueWithYAML(item.Value).AsStarlarkValue(), - yamltemplate.NewGoValueWithYAML(a.newItem.Value).AsStarlarkValue(), - } - - // TODO check thread correctness - result, err := starlark.Call(a.thread, *matcher, matcherArgs, []starlark.Tuple{}) - if err != nil { - return nil, nil, err - } - - resultBool, err := tplcore.NewStarlarkValue(result).AsBool() - if err != nil { - return nil, nil, err - } - if resultBool { - leftIdxs = append(leftIdxs, i) - matches = append(matches, item.Position) - } - } - return leftIdxs, matches, nil - - default: - return nil, nil, fmt.Errorf("Expected '%s' annotation keyword argument 'by' "+ - "to be either string (for map key) or function, but was %T", AnnotationMatch, typedVal) - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/match_annotation_expects_kwarg.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/match_annotation_expects_kwarg.go deleted file mode 100644 index f80f0d8a7..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/match_annotation_expects_kwarg.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/filepos" -) - -const ( - MatchAnnotationKwargBy string = "by" - MatchAnnotationKwargExpects string = "expects" - MatchAnnotationKwargMissingOK string = "missing_ok" - MatchAnnotationKwargWhen string = "when" -) - -type MatchAnnotationExpectsKwarg struct { - expects *starlark.Value - missingOK *starlark.Value - when *starlark.Value - thread *starlark.Thread -} - -type MatchAnnotationNumMatchError struct { - message string - fromWhen bool -} - -func (e MatchAnnotationNumMatchError) Error() string { - return e.message -} - -func (e MatchAnnotationNumMatchError) isConditional() bool { - return e.fromWhen -} - -func (a *MatchAnnotationExpectsKwarg) FillInDefaults(defaults MatchChildDefaultsAnnotation) { - if a.expects == nil { - a.expects = defaults.expects.expects - } - if a.missingOK == nil { - a.missingOK = defaults.expects.missingOK - } - if a.when == nil { - a.when = defaults.expects.when - } -} - -func (a MatchAnnotationExpectsKwarg) Check(matches []*filepos.Position) error { - switch { - case a.missingOK != nil && a.expects != nil: - return fmt.Errorf("Expected only one of keyword arguments ('%s', '%s') specified", - MatchAnnotationKwargMissingOK, MatchAnnotationKwargExpects) - - case a.missingOK != nil && a.when != nil: - return fmt.Errorf("Expected only one of keyword arguments ('%s', '%s') specified", - MatchAnnotationKwargMissingOK, MatchAnnotationKwargWhen) - - case a.when != nil && a.expects != nil: - return fmt.Errorf("Expected only one of keyword arguments ('%s', '%s') specified", - MatchAnnotationKwargWhen, MatchAnnotationKwargExpects) - - case a.missingOK != nil: - if typedResult, ok := (*a.missingOK).(starlark.Bool); ok { - if typedResult { - allowedVals := []starlark.Value{starlark.MakeInt(0), starlark.MakeInt(1)} - return a.checkValue(starlark.NewList(allowedVals), MatchAnnotationKwargMissingOK, matches) - } - return a.checkValue(starlark.MakeInt(1), MatchAnnotationKwargMissingOK, matches) - } - return fmt.Errorf("Expected keyword argument '%s' to be a boolean", - MatchAnnotationKwargMissingOK) - - case a.when != nil: - return a.checkValue(*a.when, MatchAnnotationKwargWhen, matches) - - case a.expects != nil: - return a.checkValue(*a.expects, MatchAnnotationKwargExpects, matches) - - default: - return a.checkValue(starlark.MakeInt(1), "", matches) - } -} - -func (a MatchAnnotationExpectsKwarg) checkValue(val interface{}, kwarg string, matches []*filepos.Position) error { - switch typedVal := val.(type) { - case starlark.Int: - return a.checkInt(typedVal, matches) - - case starlark.String: - return a.checkString(typedVal, matches) - - case *starlark.List: - return a.checkList(typedVal, kwarg, matches) - - case starlark.Callable: - result, err := starlark.Call(a.thread, typedVal, starlark.Tuple{starlark.MakeInt(len(matches))}, []starlark.Tuple{}) - if err != nil { - return err - } - if typedResult, ok := result.(starlark.Bool); ok { - if !bool(typedResult) { - return MatchAnnotationNumMatchError{ - message: "Expectation of number of matched nodes failed", - fromWhen: a.when != nil, - } - } - return nil - } - return fmt.Errorf("Expected keyword argument '%s' to have a function that returns a boolean", kwarg) - - default: - return fmt.Errorf("Expected '%s' annotation keyword argument '%s' "+ - "to be either int, string or function, but was %T", AnnotationMatch, kwarg, typedVal) - } -} - -func (a MatchAnnotationExpectsKwarg) checkInt(typedVal starlark.Int, matches []*filepos.Position) error { - i1, ok := typedVal.Int64() - if ok { - if i1 != int64(len(matches)) { - errMsg := fmt.Sprintf("Expected number of matched nodes to be %d, but was %d%s", - i1, len(matches), a.formatPositions(matches)) - return MatchAnnotationNumMatchError{message: errMsg, fromWhen: a.when != nil} - } - return nil - } - - i2, ok := typedVal.Uint64() - if ok { - if i2 != uint64(len(matches)) { - errMsg := fmt.Sprintf("Expected number of matched nodes to be %d, but was %d%s", - i2, len(matches), a.formatPositions(matches)) - return MatchAnnotationNumMatchError{message: errMsg, fromWhen: a.when != nil} - } - return nil - } - - panic("Unsure how to convert starlark.Int to int") -} - -func (a MatchAnnotationExpectsKwarg) checkString(typedVal starlark.String, matches []*filepos.Position) error { - typedValStr := string(typedVal) - - if strings.HasSuffix(typedValStr, "+") { - typedInt, err := strconv.Atoi(strings.TrimSuffix(typedValStr, "+")) - if err != nil { - return fmt.Errorf("Expected '%s' to be in format 'i+' where i is an integer", typedValStr) - } - - if len(matches) < typedInt { - errMsg := fmt.Sprintf("Expected number of matched nodes to be >= %d, but was %d%s", - typedInt, len(matches), a.formatPositions(matches)) - return MatchAnnotationNumMatchError{message: errMsg, fromWhen: a.when != nil} - } - - return nil - } - - return fmt.Errorf("Expected '%s' to be in format 'i+' where i is an integer", typedValStr) -} - -func (a MatchAnnotationExpectsKwarg) checkList(typedVal *starlark.List, kwarg string, matches []*filepos.Position) error { - var lastErr error - var val starlark.Value - - iter := typedVal.Iterate() - defer iter.Done() - - for iter.Next(&val) { - lastErr = a.checkValue(val, kwarg, matches) - if lastErr == nil { - return nil - } - } - return lastErr -} - -func (MatchAnnotationExpectsKwarg) formatPositions(pos []*filepos.Position) string { - if len(pos) == 0 { - return "" - } - lines := []string{} - for _, p := range pos { - lines = append(lines, p.AsCompactString()) - } - sort.Strings(lines) - return " (lines: " + strings.Join(lines, ", ") + ")" -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/match_child_defaults_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/match_child_defaults_annotation.go deleted file mode 100644 index 95dbf7215..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/match_child_defaults_annotation.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" -) - -type MatchChildDefaultsAnnotation struct { - expects MatchAnnotationExpectsKwarg -} - -func NewEmptyMatchChildDefaultsAnnotation() MatchChildDefaultsAnnotation { - return MatchChildDefaultsAnnotation{ - expects: MatchAnnotationExpectsKwarg{}, - } -} - -func NewMatchChildDefaultsAnnotation(node template.EvaluationNode, - parentMatchChildDefaults MatchChildDefaultsAnnotation) (MatchChildDefaultsAnnotation, error) { - - annotation := MatchChildDefaultsAnnotation{ - // TODO do we need to propagate thread? - expects: MatchAnnotationExpectsKwarg{}, - } - kwargs := template.NewAnnotations(node).Kwargs(AnnotationMatchChildDefaults) - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - switch kwargName { - case MatchAnnotationKwargExpects: - annotation.expects.expects = &kwarg[1] - case MatchAnnotationKwargMissingOK: - annotation.expects.missingOK = &kwarg[1] - case MatchAnnotationKwargWhen: - annotation.expects.when = &kwarg[1] - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationMatchChildDefaults, kwargName) - } - } - - annotation.expects.FillInDefaults(parentMatchChildDefaults) - - return annotation, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/op.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/op.go deleted file mode 100644 index 3c4d2f869..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/op.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -type Op struct { - Left interface{} - Right interface{} - - Thread *starlark.Thread - - ExactMatch bool -} - -func (o Op) Apply() (interface{}, error) { - leftObj := yamlmeta.NewASTFromInterface(o.Left) - rightObj := yamlmeta.NewASTFromInterface(o.Right) - - _, err := o.apply(leftObj, rightObj, NewEmptyMatchChildDefaultsAnnotation()) - if err != nil { - return nil, err - } - - o.removeOverlayAnns(leftObj) - return leftObj, nil -} - -func (o Op) apply(left, right interface{}, parentMatchChildDefaults MatchChildDefaultsAnnotation) (bool, error) { - switch typedRight := right.(type) { - case *yamlmeta.DocumentSet: - var docSetArray []*yamlmeta.DocumentSet - - typedLeft, isDocSet := left.(*yamlmeta.DocumentSet) - if !isDocSet { - // support array of docsets to allow consumers to - // keep proper association of document to docsets - // (see matching for overlay post processing) - typedLeft, isDocSetArray := left.([]*yamlmeta.DocumentSet) - if !isDocSetArray { - return false, fmt.Errorf("Expected docset, but was %T", left) - } - docSetArray = typedLeft - } else { - docSetArray = []*yamlmeta.DocumentSet{typedLeft} - } - - return o.applyDocSet(docSetArray, typedRight, parentMatchChildDefaults) - - case *yamlmeta.Document: - panic("Unexpected doc") - - case *yamlmeta.Map: - typedLeft, isMap := left.(*yamlmeta.Map) - if !isMap { - return false, fmt.Errorf("Expected map, but was %T", left) - } - - for _, item := range typedRight.Items { - item := item.DeepCopy() - - op, err := whichOp(item) - if err == nil { - switch op { - case AnnotationMerge: - err = o.mergeMapItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationRemove: - err = o.removeMapItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationReplace: - err = o.replaceMapItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationAssert: - err = o.assertMapItem(typedLeft, item, parentMatchChildDefaults) - default: - err = fmt.Errorf("Overlay op %s is not supported on map item", op) - } - } - if err != nil { - return false, fmt.Errorf("Map item (key '%s') on %s: %s", - item.Key, item.Position.AsString(), err) - } - } - - case *yamlmeta.MapItem: - panic("Unexpected mapitem") - - case *yamlmeta.Array: - typedLeft, isArray := left.(*yamlmeta.Array) - if !isArray { - return false, fmt.Errorf("Expected array, but was %T", left) - } - - for _, item := range typedRight.Items { - item := item.DeepCopy() - - op, err := whichOp(item) - if err == nil { - switch op { - case AnnotationMerge: - err = o.mergeArrayItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationRemove: - err = o.removeArrayItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationReplace: - err = o.replaceArrayItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationInsert: - err = o.insertArrayItem(typedLeft, item, parentMatchChildDefaults) - case AnnotationAppend: - err = o.appendArrayItem(typedLeft, item) - case AnnotationAssert: - err = o.assertArrayItem(typedLeft, item, parentMatchChildDefaults) - default: - err = fmt.Errorf("Overlay op %s is not supported on array item", op) - } - } - if err != nil { - return false, fmt.Errorf("Array item on %s: %s", item.Position.AsString(), err) - } - } - - case *yamlmeta.ArrayItem: - panic("Unexpected arrayitem") - - default: - return true, nil - } - - return false, nil -} - -func (o Op) applyDocSet( - typedLeft []*yamlmeta.DocumentSet, typedRight *yamlmeta.DocumentSet, - parentMatchChildDefaults MatchChildDefaultsAnnotation) (bool, error) { - - for _, doc := range typedRight.Items { - doc := doc.DeepCopy() - - op, err := whichOp(doc) - if err == nil { - switch op { - case AnnotationMerge: - err = o.mergeDocument(typedLeft, doc, parentMatchChildDefaults) - case AnnotationRemove: - err = o.removeDocument(typedLeft, doc, parentMatchChildDefaults) - case AnnotationReplace: - err = o.replaceDocument(typedLeft, doc, parentMatchChildDefaults) - case AnnotationInsert: - err = o.insertDocument(typedLeft, doc, parentMatchChildDefaults) - case AnnotationAppend: - err = o.appendDocument(typedLeft, doc) - case AnnotationAssert: - err = o.assertDocument(typedLeft, doc, parentMatchChildDefaults) - default: - err = fmt.Errorf("Overlay op %s is not supported on document", op) - } - } - if err != nil { - return false, fmt.Errorf("Document on %s: %s", doc.Position.AsString(), err) - } - } - - return false, nil -} - -func (o Op) removeOverlayAnns(val interface{}) { - node, ok := val.(yamlmeta.Node) - if !ok { - return - } - - template.NewAnnotations(node).DeleteNs(AnnotationNs) - - for _, childVal := range node.GetValues() { - o.removeOverlayAnns(childVal) - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/replace_annotation.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/replace_annotation.go deleted file mode 100644 index 9a09aac40..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/overlay/replace_annotation.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package overlay - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/ytt/pkg/template" - tplcore "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamltemplate" -) - -const ( - ReplaceAnnotationKwargVia string = "via" - ReplaceAnnotationKwargOrAdd string = "or_add" -) - -type ReplaceAnnotation struct { - newNode template.EvaluationNode - thread *starlark.Thread - via *starlark.Value - orAdd bool -} - -func NewReplaceAnnotation(newNode template.EvaluationNode, thread *starlark.Thread) (ReplaceAnnotation, error) { - annotation := ReplaceAnnotation{ - newNode: newNode, - thread: thread, - } - kwargs := template.NewAnnotations(newNode).Kwargs(AnnotationReplace) - - for _, kwarg := range kwargs { - kwargName := string(kwarg[0].(starlark.String)) - - switch kwargName { - case ReplaceAnnotationKwargVia: - annotation.via = &kwarg[1] - - case ReplaceAnnotationKwargOrAdd: - resultBool, err := tplcore.NewStarlarkValue(kwarg[1]).AsBool() - if err != nil { - return ReplaceAnnotation{}, err - } - annotation.orAdd = resultBool - - default: - return annotation, fmt.Errorf( - "Unknown '%s' annotation keyword argument '%s'", AnnotationReplace, kwargName) - } - } - - return annotation, nil -} - -func (a ReplaceAnnotation) Value(existingNode template.EvaluationNode) (interface{}, error) { - // Make sure original nodes are not affected in any way - newNode := a.newNode.DeepCopyAsInterface().(template.EvaluationNode) - - // TODO currently assumes that we can always get at least one value - if a.via == nil { - return newNode.GetValues()[0], nil - } - - switch typedVal := (*a.via).(type) { - case starlark.Callable: - var existingVal interface{} - if existingNode != nil { - // Make sure original nodes are not affected in any way - existingVal = existingNode.DeepCopyAsInterface().(template.EvaluationNode).GetValues()[0] - } else { - existingVal = nil - } - - viaArgs := starlark.Tuple{ - yamltemplate.NewGoValueWithYAML(existingVal).AsStarlarkValue(), - yamltemplate.NewGoValueWithYAML(newNode.GetValues()[0]).AsStarlarkValue(), - } - - // TODO check thread correctness - result, err := starlark.Call(a.thread, *a.via, viaArgs, []starlark.Tuple{}) - if err != nil { - return nil, err - } - - return tplcore.NewStarlarkValue(result).AsGoValue() - - default: - return nil, fmt.Errorf("Expected '%s' annotation keyword argument 'via'"+ - " to be function, but was %T", AnnotationReplace, typedVal) - } -} - -func (a ReplaceAnnotation) OrAdd() bool { return a.orAdd } diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/regexp.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/regexp.go deleted file mode 100644 index b901c7733..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/regexp.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - "regexp" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - RegexpAPI = starlark.StringDict{ - "regexp": &starlarkstruct.Module{ - Name: "regexp", - Members: starlark.StringDict{ - "match": starlark.NewBuiltin("regexp.match", core.ErrWrapper(regexpModule{}.Match)), - "replace": starlark.NewBuiltin("regexp.replace", core.ErrWrapper(regexpModule{}.Replace)), - }, - }, - } -) - -type regexpModule struct{} - -func (b regexpModule) Match(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 2 { - return starlark.None, fmt.Errorf("expected exactly two arguments") - } - - pattern, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - target, err := core.NewStarlarkValue(args.Index(1)).AsString() - if err != nil { - return starlark.None, err - } - - matched, err := regexp.MatchString(pattern, target) - if err != nil { - return starlark.None, err - } - - return starlark.Bool(matched), nil -} - -func (b regexpModule) Replace(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 3 { - return starlark.None, fmt.Errorf("expected exactly 3 arguments") - } - - pattern, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - re, err := regexp.Compile(pattern) - if err != nil { - return starlark.None, err - } - - source, err := core.NewStarlarkValue(args.Index(1)).AsString() - if err != nil { - return starlark.None, err - } - - repl := args.Index(2) - switch typedRepl := repl.(type) { - case starlark.Callable: - return b.replaceLambda(thread, re, source, typedRepl) - default: - return b.replaceString(re, source, repl) - } -} - -func (b regexpModule) replaceString(re *regexp.Regexp, source string, repl starlark.Value) (starlark.Value, error) { - replStr, err := core.NewStarlarkValue(repl).AsString() - if err != nil { - return starlark.None, err - } - - newString := re.ReplaceAllString(source, replStr) - - return starlark.String(newString), nil -} - -func (b regexpModule) replaceLambda(thread *starlark.Thread, re *regexp.Regexp, source string, repl starlark.Callable) (starlark.Value, error) { - var lastErr error - newString := re.ReplaceAllStringFunc(source, func(match string) string { - if lastErr != nil { - // if we have multiple matches but an earlier replace caused an error, we want to return - // quickly then propagate that error - return "" - } - - args := starlark.Tuple{starlark.String(match)} - var result starlark.Value - result, lastErr = starlark.Call(thread, repl, args, []starlark.Tuple{}) - if lastErr != nil { - return "" - } - - var newString string - newString, lastErr = core.NewStarlarkValue(result).AsString() - if lastErr != nil { - return "" - } - return newString - }) - - if lastErr != nil { - return nil, lastErr - } - - return starlark.String(newString), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/sha256.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/sha256.go deleted file mode 100644 index 6e935971c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/sha256.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "crypto/sha256" - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - SHA256API = starlark.StringDict{ - "sha256": &starlarkstruct.Module{ - Name: "sha256", - Members: starlark.StringDict{ - "sum": starlark.NewBuiltin("sha256.sum", core.ErrWrapper(sha256Module{}.Sum)), - }, - }, - } -) - -type sha256Module struct{} - -func (b sha256Module) Sum(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - return starlark.String(fmt.Sprintf("%x", sha256.Sum256([]byte(val)))), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/struct.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/struct.go deleted file mode 100644 index 6156a07a9..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/struct.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/orderedmap" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - StructAPI = starlark.StringDict{ - "struct": &starlarkstruct.Module{ - Name: "struct", - Members: starlark.StringDict{ - "make": starlark.NewBuiltin("struct.make", core.ErrWrapper(structModule{}.Make)), - "make_and_bind": starlark.NewBuiltin("struct.make_and_bind", core.ErrWrapper(structModule{}.MakeAndBind)), - "bind": starlark.NewBuiltin("struct.bind", core.ErrWrapper(structModule{}.Bind)), - - "encode": starlark.NewBuiltin("struct.encode", core.ErrWrapper(structModule{}.Encode)), - "decode": starlark.NewBuiltin("struct.decode", core.ErrWrapper(structModule{}.Decode)), - }, - }, - } -) - -type structModule struct{} - -func (b structModule) Make(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if len(args) > 0 { - return nil, fmt.Errorf("unexpected positional arguments (structs are made from keyword arguments only)") - } - return b.fromKeywords(kwargs), nil -} - -// fromKeywords returns a new struct instance whose fields are specified by the -// key/value pairs in kwargs. (Each kwargs[i][0] must be a starlark.String.) -func (b structModule) fromKeywords(kwargs []starlark.Tuple) *core.StarlarkStruct { - data := orderedmap.Map{} - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - v := kwarg[1] - data.Set(k, v) - } - return core.NewStarlarkStruct(&data) -} - -func (b structModule) MakeAndBind(thread *starlark.Thread, f *starlark.Builtin, - bindArgs starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - if bindArgs.Len() != 1 { - return starlark.None, fmt.Errorf("expected at exactly one argument") - } - - for i, kwarg := range kwargs { - if _, ok := kwarg[1].(starlark.Callable); ok { - boundFunc, err := b.Bind(thread, nil, starlark.Tuple{kwarg[1], bindArgs.Index(0)}, nil) - if err != nil { - return starlark.None, fmt.Errorf("binding %s: %s", kwarg[0], err) - } - kwarg[1] = boundFunc - kwargs[i] = kwarg - } - } - - return b.Make(thread, nil, starlark.Tuple{}, kwargs) -} - -func (b structModule) Bind(thread *starlark.Thread, f *starlark.Builtin, - bindArgs starlark.Tuple, _ []starlark.Tuple) (starlark.Value, error) { - - if bindArgs.Len() < 2 { - return starlark.None, fmt.Errorf("expected at least two arguments") - } - - resultFunc := func(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - - newArgs := append(starlark.Tuple{}, bindArgs[1:]...) - newArgs = append(newArgs, args...) - - return starlark.Call(thread, bindArgs.Index(0), newArgs, kwargs) - } - - return starlark.NewBuiltin("struct.bind_result", core.ErrWrapper(resultFunc)), nil -} - -func (b structModule) Encode(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, _ []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - return core.NewGoValueWithOpts(val, core.GoValueOpts{MapIsStruct: true}).AsStarlarkValue(), nil -} - -func (b structModule) Decode(thread *starlark.Thread, f *starlark.Builtin, - args starlark.Tuple, _ []starlark.Tuple) (starlark.Value, error) { - - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - return core.NewGoValue(val).AsStarlarkValue(), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/template.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/template.go deleted file mode 100644 index df09002ce..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/template.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" -) - -type TemplateModule struct { - replaceNodeFunc core.StarlarkFunc -} - -func NewTemplateModule(replaceNodeFunc core.StarlarkFunc) TemplateModule { - return TemplateModule{replaceNodeFunc} -} - -func (b TemplateModule) AsModule() starlark.StringDict { - return starlark.StringDict{ - "template": &starlarkstruct.Module{ - Name: "template", - Members: starlark.StringDict{ - "replace": starlark.NewBuiltin("template.replace", core.ErrWrapper(b.replaceNodeFunc)), - }, - }, - } -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/url.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/url.go deleted file mode 100644 index d6454535c..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/url.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - "net/url" - "sort" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/orderedmap" - "github.com/k14s/ytt/pkg/template/core" -) - -var ( - URLAPI = starlark.StringDict{ - "url": &starlarkstruct.Module{ - Name: "url", - Members: starlark.StringDict{ - "path_segment_encode": starlark.NewBuiltin("url.path_segment_encode", core.ErrWrapper(urlModule{}.PathSegmentEncode)), - "path_segment_decode": starlark.NewBuiltin("url.path_segment_decode", core.ErrWrapper(urlModule{}.PathSegmentDecode)), - - "query_param_value_encode": starlark.NewBuiltin("url.query_param_value_encode", core.ErrWrapper(urlModule{}.QueryParamValueEncode)), - "query_param_value_decode": starlark.NewBuiltin("url.query_param_value_decode", core.ErrWrapper(urlModule{}.QueryParamValueDecode)), - - "query_params_encode": starlark.NewBuiltin("url.query_params_encode", core.ErrWrapper(urlModule{}.QueryParamsEncode)), - "query_params_decode": starlark.NewBuiltin("url.query_params_decode", core.ErrWrapper(urlModule{}.QueryParamsDecode)), - - "parse": starlark.NewBuiltin("url.parse", core.ErrWrapper(urlModule{}.ParseURL)), - }, - }, - } -) - -type urlModule struct{} - -// URLValue stores a parsed URL -type URLValue struct { - url *url.URL - *core.StarlarkStruct // TODO: keep authorship of the interface by delegating instead of embedding -} - -// URLUser stores the user information -type URLUser struct { - user *url.Userinfo - *core.StarlarkStruct // TODO: keep authorship of the interface by delegating instead of embedding -} - -func (b urlModule) PathSegmentEncode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - return starlark.String(url.PathEscape(val)), nil -} - -func (b urlModule) PathSegmentDecode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - val, err = url.PathUnescape(val) - if err != nil { - return starlark.None, err - } - - return starlark.String(val), nil -} - -func (b urlModule) QueryParamValueEncode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - return starlark.String(url.QueryEscape(val)), nil -} - -func (b urlModule) QueryParamValueDecode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - val, err = url.QueryUnescape(val) - if err != nil { - return starlark.None, err - } - - return starlark.String(val), nil -} - -func (b urlModule) QueryParamsEncode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - - typedVal, ok := val.(*orderedmap.Map) - if !ok { - return starlark.None, fmt.Errorf("expected argument to be a map, but was %T", val) - } - - urlVals := url.Values{} - - err = typedVal.IterateErr(func(key, val interface{}) error { - keyStr, ok := key.(string) - if !ok { - return fmt.Errorf("expected map key to be string, but was %T", key) - } - - valArray, ok := val.([]interface{}) - if !ok { - return fmt.Errorf("expected map value to be array, but was %T", val) - } - - if len(valArray) == 0 { - urlVals[keyStr] = []string{} - } else { - for _, valItem := range valArray { - valItemStr, ok := valItem.(string) - if !ok { - return fmt.Errorf("expected array value to be string, but was %T", valItem) - } - urlVals[keyStr] = append(urlVals[keyStr], valItemStr) - } - } - - return nil - }) - if err != nil { - return starlark.None, err - } - - return starlark.String(urlVals.Encode()), nil -} - -func (b urlModule) QueryParamsDecode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - encodedVal, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - urlVals, err := url.ParseQuery(encodedVal) - if err != nil { - return starlark.None, err - } - - result := orderedmap.NewMap() - - for _, key := range b.sortedKeys(urlVals) { - val := []interface{}{} - for _, v := range urlVals[key] { - val = append(val, v) - } - result.Set(key, val) - } - - return core.NewGoValue(result).AsStarlarkValue(), nil -} - -func (b urlModule) sortedKeys(vals url.Values) []string { - var result []string - for k := range vals { - result = append(result, k) - } - sort.Strings(result) - return result -} - -func (b urlModule) ParseURL(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - urlStr, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - parsedURL, err := url.Parse(urlStr) - if err != nil { - return starlark.None, err - } - - return (&URLValue{parsedURL, nil}).AsStarlarkValue(), nil -} - -func (uv *URLValue) Type() string { return "@ytt:url.value" } - -func (uv *URLValue) AsStarlarkValue() starlark.Value { - m := orderedmap.NewMap() - m.Set("user", uv.User()) - m.Set("without_user", starlark.NewBuiltin("url.without_user", core.ErrWrapper(uv.WithoutUser))) - m.Set("string", starlark.NewBuiltin("url.string", core.ErrWrapper(uv.string))) - m.Set("hostname", starlark.NewBuiltin("url.hostname", core.ErrWrapper(uv.Hostname))) - uv.StarlarkStruct = core.NewStarlarkStruct(m) - return uv -} - -func (uv *URLValue) ConversionHint() string { - return "URLValue does not automatically encode (hint: use .string())" -} - -func (uv *URLValue) Hostname(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 0 { - return starlark.None, fmt.Errorf("expected no argument") - } - return starlark.String(uv.url.Hostname()), nil -} - -func (uu *URLUser) string(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 0 { - return starlark.None, fmt.Errorf("expected no argument") - } - return starlark.String(uu.user.String()), nil -} - -func (uv *URLValue) User() starlark.Value { - if uv.url.User == nil { - return starlark.None - } - - uu := &URLUser{uv.url.User, nil} - m := orderedmap.NewMap() - m.Set("name", starlark.String(uu.user.Username())) - m.Set("password", uu.password()) - m.Set("string", starlark.NewBuiltin("string", core.ErrWrapper(uu.string))) - uu.StarlarkStruct = core.NewStarlarkStruct(m) - return uu -} - -func (uu *URLUser) Type() string { return "@ytt:url.user" } - -func (uu *URLUser) ConversionHint() string { - return "URLUser does not automatically encode (hint: use .string())" -} - -func (uu *URLUser) password() starlark.Value { - passwd, passwdSet := uu.user.Password() - if !passwdSet { - return starlark.None - } - return starlark.String(passwd) -} - -func (uv *URLValue) WithoutUser(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 0 { - return starlark.None, fmt.Errorf("expected no argument") - } - urlVar := *uv.url - urlVar.User = nil - return (&URLValue{&urlVar, nil}).AsStarlarkValue(), nil -} - -func (uv *URLValue) string(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 0 { - return starlark.None, fmt.Errorf("expected no argument") - } - return starlark.String(uv.url.String()), nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/version.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/version.go deleted file mode 100644 index 71b04941d..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/version.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - "regexp" - - semver "github.com/hashicorp/go-version" - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/version" -) - -const ( - SemverRegex string = `^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)` + - `(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))` + - `?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` -) - -var ( - VersionAPI = starlark.StringDict{ - "version": &starlarkstruct.Module{ - Name: "version", - Members: starlark.StringDict{ - "require_at_least": starlark.NewBuiltin("version.require_at_least", core.ErrWrapper(versionModule{}.RequireAtLeast)), - }, - }, - } -) - -type versionModule struct{} - -func (b versionModule) RequireAtLeast(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - r := regexp.MustCompile(SemverRegex) - if !r.MatchString(val) { - return starlark.None, fmt.Errorf("version string '%s' must be a valid semver", val) - } - - userConstraint, err := semver.NewConstraint(">=" + val) - if err != nil { - return starlark.None, err - } - - yttVersion, err := semver.NewVersion(version.Version) - if err != nil { - return starlark.None, err - } - - satisfied := userConstraint.Check(yttVersion) - if !satisfied { - return starlark.None, fmt.Errorf("ytt version '%s' does not meet the minimum required version '%s'", version.Version, val) - } - - return starlark.None, nil -} diff --git a/vendor/github.com/k14s/ytt/pkg/yttlibrary/yaml.go b/vendor/github.com/k14s/ytt/pkg/yttlibrary/yaml.go deleted file mode 100644 index 222a9c373..000000000 --- a/vendor/github.com/k14s/ytt/pkg/yttlibrary/yaml.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2020 VMware, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package yttlibrary - -import ( - "fmt" - - "github.com/k14s/starlark-go/starlark" - "github.com/k14s/starlark-go/starlarkstruct" - "github.com/k14s/ytt/pkg/template/core" - "github.com/k14s/ytt/pkg/yamlmeta" -) - -var ( - YAMLAPI = starlark.StringDict{ - "yaml": &starlarkstruct.Module{ - Name: "yaml", - Members: starlark.StringDict{ - "encode": starlark.NewBuiltin("yaml.encode", core.ErrWrapper(yamlModule{}.Encode)), - "decode": starlark.NewBuiltin("yaml.decode", core.ErrWrapper(yamlModule{}.Decode)), - }, - }, - } -) - -type yamlModule struct{} - -func (b yamlModule) Encode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - val, err := core.NewStarlarkValue(args.Index(0)).AsGoValue() - if err != nil { - return starlark.None, err - } - - var docSet *yamlmeta.DocumentSet - - switch typedVal := val.(type) { - case *yamlmeta.DocumentSet: - docSet = typedVal - case *yamlmeta.Document: - // Documents should be part of DocumentSet by the time it makes it here - panic("Unexpected document") - default: - docSet = &yamlmeta.DocumentSet{Items: []*yamlmeta.Document{{Value: typedVal}}} - } - - valBs, err := docSet.AsBytes() - if err != nil { - return starlark.None, err - } - - return starlark.String(string(valBs)), nil -} - -func (b yamlModule) Decode(thread *starlark.Thread, f *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if args.Len() != 1 { - return starlark.None, fmt.Errorf("expected exactly one argument") - } - - valEncoded, err := core.NewStarlarkValue(args.Index(0)).AsString() - if err != nil { - return starlark.None, err - } - - var valDecoded interface{} - - err = yamlmeta.PlainUnmarshal([]byte(valEncoded), &valDecoded) - if err != nil { - return starlark.None, err - } - - return core.NewGoValue(valDecoded).AsStarlarkValue(), nil -} diff --git a/vendor/github.com/vito/go-interact/LICENSE.md b/vendor/github.com/vito/go-interact/LICENSE.md deleted file mode 100644 index a458287fd..000000000 --- a/vendor/github.com/vito/go-interact/LICENSE.md +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015-2016 Alex Suraci - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/vito/go-interact/interact/choice.go b/vendor/github.com/vito/go-interact/interact/choice.go deleted file mode 100644 index 10d1cad05..000000000 --- a/vendor/github.com/vito/go-interact/interact/choice.go +++ /dev/null @@ -1,10 +0,0 @@ -package interact - -// Choice is used to allow the user to select a value of an arbitrary type. -// Its Display value will be shown in a listing during Resolve, and if its -// entry in the list is chosen, the Value will be used to populate the -// destination. -type Choice struct { - Display string - Value interface{} -} diff --git a/vendor/github.com/vito/go-interact/interact/errors.go b/vendor/github.com/vito/go-interact/interact/errors.go deleted file mode 100644 index c338bfac9..000000000 --- a/vendor/github.com/vito/go-interact/interact/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -package interact - -import ( - "errors" - "fmt" - "reflect" -) - -// ErrNotANumber is used internally by Resolve when the user enters a bogus -// value when resolving into an int. -// -// Resolve will retry on this error; it is only exposed so you can know where -// the string is coming from. -var ErrNotANumber = errors.New("not a number") - -// ErrNotBoolean is used internally by Resolve when the user enters a bogus -// value when resolving into a bool. -// -// Resolve will retry on this error; it is only exposed so you can know where -// the string is coming from. -var ErrNotBoolean = errors.New("not y, n, yes, or no") - -// NotAssignableError is returned by Resolve when the value present in the -// Choice the user selected is not assignable to the destination value during -// Resolve. -type NotAssignableError struct { - Destination reflect.Type - Value reflect.Type -} - -func (err NotAssignableError) Error() string { - return fmt.Sprintf("chosen value (%T) is not assignable to %T", err.Value, err.Destination) -} diff --git a/vendor/github.com/vito/go-interact/interact/interaction.go b/vendor/github.com/vito/go-interact/interact/interaction.go deleted file mode 100644 index 5471dc3f3..000000000 --- a/vendor/github.com/vito/go-interact/interact/interaction.go +++ /dev/null @@ -1,317 +0,0 @@ -package interact - -import ( - "fmt" - "io" - "os" - "reflect" - "strconv" - - "golang.org/x/term" -) - -// Interaction represents a single question to ask, optionally with a set of -// choices to limit the answer to. -type Interaction struct { - Prompt string - Choices []Choice - - Input io.Reader - Output io.Writer -} - -// NewInteraction constructs an interaction with the given prompt, limited to -// the given choices, if any. -// -// Defaults Input and Output to os.Stdin and os.Stderr, respectively. -func NewInteraction(prompt string, choices ...Choice) Interaction { - return Interaction{ - Input: os.Stdin, - Output: os.Stdout, - Prompt: prompt, - Choices: choices, - } -} - -// Resolve prints the prompt, indicating the default value, and asks for the -// value to populate into the destination dst, which should be a pointer to a -// value to set. -// -// The default value is whatever value is currently held in dst, and will be -// shown in the prompt. Note that zero-values are valid defaults (e.g. false -// for a boolean prompt), so to disambiguate from having just allocated dst, -// and not intending its current zero-value to be the default, you must wrap it -// in a RequiredDestination. -// -// If the choices are limited, the default value will be inferred by finding -// the value held in dst within the set of choices. The number corresponding -// to the choice will be the default value shown to the user. If no default is -// found, Resolve will require the user to make a selection. -// -// The type of dst determines how the value is read. Currently supported types -// for the destination are int, string, bool, and any arbitrary value that is -// defined within the set of Choices. -// -// Valid input strings for bools are "y", "n", "Y", "N", "yes", and "no". -// Integer values are parsed in base-10. String values will not include any -// trailing linebreak. -func (interaction Interaction) Resolve(dst interface{}) error { - prompt := interaction.prompt(dst) - - var user userIO - if input, output, ok := interaction.getStreams(); ok && term.IsTerminal(int(input.Fd())) { - state, err := term.MakeRaw(int(input.Fd())) - if err != nil { - return err - } - - defer term.Restore(int(input.Fd()), state) - - term, err := newTTYUser(input, output) - if err != nil { - return err - } - - user = term - } else { - user = newNonTTYUser(interaction.Input, interaction.Output) - } - - if len(interaction.Choices) == 0 { - return interaction.resolveSingle(dst, user, prompt) - } - - return interaction.resolveChoices(dst, user, prompt) -} - -func (interaction Interaction) getStreams() (*os.File, *os.File, bool) { - input, inputConverted := interaction.Input.(*os.File) - output, outputConverted := interaction.Output.(*os.File) - return input, output, inputConverted && outputConverted -} - -func (interaction Interaction) prompt(dst interface{}) string { - if len(interaction.Choices) > 0 { - num, present := interaction.choiceNumber(dst) - if present { - return fmt.Sprintf("%s (%d): ", interaction.Prompt, num) - } - - return fmt.Sprintf("%s: ", interaction.Prompt) - } - - switch v := dst.(type) { - case RequiredDestination: - switch v.Destination.(type) { - case *bool: - return fmt.Sprintf("%s [yn]: ", interaction.Prompt) - default: - return fmt.Sprintf("%s: ", interaction.Prompt) - } - case *int: - return fmt.Sprintf("%s (%d): ", interaction.Prompt, *v) - case *string: - return fmt.Sprintf("%s (%s): ", interaction.Prompt, *v) - case *bool: - var indicator string - if *v { - indicator = "Yn" - } else { - indicator = "yN" - } - - return fmt.Sprintf("%s [%s]: ", interaction.Prompt, indicator) - case *Password: - if len(*v) == 0 { - return fmt.Sprintf("%s (): ", interaction.Prompt) - } - - return fmt.Sprintf("%s (has default): ", interaction.Prompt) - default: - return fmt.Sprintf("%s (unknown): ", interaction.Prompt) - } -} - -func (interaction Interaction) choiceNumber(dst interface{}) (int, bool) { - for i, c := range interaction.Choices { - dstVal := reflect.ValueOf(dst).Elem() - - if c.Value == nil && dstVal.IsNil() { - return i + 1, true - } - - if reflect.DeepEqual(c.Value, dstVal.Interface()) { - return i + 1, true - } - } - - return 0, false -} - -func (interaction Interaction) resolveSingle(dst interface{}, user userIO, prompt string) error { - for { - _, retry, err := interaction.readInto(dst, user, prompt) - if err == io.EOF { - return err - } - - if err != nil { - if retry { - user.WriteLine(fmt.Sprintf("invalid input (%s)", err)) - continue - } else { - return err - } - } - - break - } - - return nil -} - -func (interaction Interaction) resolveChoices(dst interface{}, user userIO, prompt string) error { - dstVal := reflect.ValueOf(dst) - - for i, choice := range interaction.Choices { - err := user.WriteLine(fmt.Sprintf("%d: %s", i+1, choice.Display)) - if err != nil { - return err - } - } - - for { - var retry bool - var err error - - num, present := interaction.choiceNumber(dst) - if present { - _, retry, err = interaction.readInto(&num, user, prompt) - } else { - _, retry, err = interaction.readInto(Required(&num), user, prompt) - } - - if err == io.EOF { - return err - } - - if err != nil { - if retry { - user.WriteLine(fmt.Sprintf("invalid selection (%s)", err)) - continue - } else { - return err - } - } - - if num == 0 || num > len(interaction.Choices) { - user.WriteLine(fmt.Sprintf("invalid selection (must be 1-%d)", len(interaction.Choices))) - continue - } - - choice := interaction.Choices[num-1] - - if choice.Value == nil { - dstVal.Elem().Set(reflect.Zero(dstVal.Type().Elem())) - } else { - choiceVal := reflect.ValueOf(choice.Value) - - if choiceVal.Type().AssignableTo(dstVal.Type().Elem()) { - dstVal.Elem().Set(choiceVal) - } else { - return NotAssignableError{ - Value: choiceVal.Type(), - Destination: dstVal.Type().Elem(), - } - } - } - - return nil - } -} - -func (interaction Interaction) readInto(dst interface{}, user userIO, prompt string) (bool, bool, error) { - switch v := dst.(type) { - case RequiredDestination: - for { - read, retry, err := interaction.readInto(v.Destination, user, prompt) - if err != nil { - return false, retry, err - } - - if read { - return true, false, nil - } - } - - case *int: - line, err := user.ReadLine(prompt) - if err != nil { - return false, false, err - } - - if len(line) == 0 { - return false, false, nil - } - - num, err := strconv.Atoi(line) - if err != nil { - return false, true, ErrNotANumber - } - - *v = num - - return true, false, nil - - case *string: - line, err := user.ReadLine(prompt) - if err != nil { - return false, false, err - } - - if len(line) == 0 { - return false, false, nil - } - - *v = line - - return true, false, nil - - case *Password: - pass, err := user.ReadPassword(prompt) - if err != nil { - return false, false, err - } - - if len(pass) == 0 { - return false, false, nil - } - - *v = Password(pass) - - return true, false, nil - - case *bool: - line, err := user.ReadLine(prompt) - if err != nil { - return false, false, err - } - - if len(line) == 0 { - return false, false, nil - } - - switch line { - case "y", "Y", "yes": - *v = true - case "n", "N", "no": - *v = false - default: - return false, true, ErrNotBoolean - } - - return true, false, nil - } - - return false, false, fmt.Errorf("unknown destination type: %T", dst) -} diff --git a/vendor/github.com/vito/go-interact/interact/password.go b/vendor/github.com/vito/go-interact/interact/password.go deleted file mode 100644 index 21c172765..000000000 --- a/vendor/github.com/vito/go-interact/interact/password.go +++ /dev/null @@ -1,5 +0,0 @@ -package interact - -// Password is a string whose value will not be echoed when the user's typing -// or when used as a default value. -type Password string diff --git a/vendor/github.com/vito/go-interact/interact/required.go b/vendor/github.com/vito/go-interact/interact/required.go deleted file mode 100644 index 9db8ec5d3..000000000 --- a/vendor/github.com/vito/go-interact/interact/required.go +++ /dev/null @@ -1,13 +0,0 @@ -package interact - -// RequiredDestination wraps the real destination and indicates to Resolve -// that a value must be explicitly provided, and that there is no default. This -// is to distinguish from defaulting to the zero-value. -type RequiredDestination struct { - Destination interface{} -} - -// Required is a convenience function for constructing a RequiredDestination. -func Required(dst interface{}) RequiredDestination { - return RequiredDestination{dst} -} diff --git a/vendor/github.com/vito/go-interact/interact/userio.go b/vendor/github.com/vito/go-interact/interact/userio.go deleted file mode 100644 index 69862dda1..000000000 --- a/vendor/github.com/vito/go-interact/interact/userio.go +++ /dev/null @@ -1,131 +0,0 @@ -package interact - -import ( - "fmt" - "io" - "os" - - "golang.org/x/term" -) - -type userIO interface { - WriteLine(line string) error - - ReadLine(prompt string) (string, error) - ReadPassword(prompt string) (string, error) -} - -type ttyUser struct { - *term.Terminal -} - -func newTTYUser(input io.Reader, output *os.File) (ttyUser, error) { - t := term.NewTerminal(readWriter{input, output}, "") - - width, height, err := term.GetSize(int(output.Fd())) - if err != nil { - return ttyUser{}, err - } - - err = t.SetSize(width, height) - if err != nil { - return ttyUser{}, err - } - - return ttyUser{ - Terminal: t, - }, nil -} - -func (u ttyUser) WriteLine(line string) error { - _, err := fmt.Fprintf(u.Terminal, "%s\r\n", line) - return err -} - -func (u ttyUser) ReadLine(prompt string) (string, error) { - u.Terminal.SetPrompt(prompt) - return u.Terminal.ReadLine() -} - -type nonTTYUser struct { - io.Reader - io.Writer -} - -func newNonTTYUser(input io.Reader, output io.Writer) nonTTYUser { - return nonTTYUser{ - Reader: input, - Writer: output, - } -} - -func (u nonTTYUser) WriteLine(line string) error { - _, err := fmt.Fprintf(u.Writer, "%s\n", line) - return err -} - -func (u nonTTYUser) ReadLine(prompt string) (string, error) { - _, err := fmt.Fprintf(u.Writer, "%s", prompt) - if err != nil { - return "", err - } - - line, err := u.readLine() - if err != nil { - return "", err - } - - _, err = fmt.Fprintf(u.Writer, "%s\n", line) - if err != nil { - return "", err - } - - return line, nil -} - -func (u nonTTYUser) ReadPassword(prompt string) (string, error) { - _, err := fmt.Fprintf(u.Writer, "%s", prompt) - if err != nil { - return "", err - } - - line, err := u.readLine() - if err != nil { - return "", err - } - - _, err = fmt.Fprintf(u.Writer, "\n") - if err != nil { - return "", err - } - - return line, nil -} - -func (u nonTTYUser) readLine() (string, error) { - var line string - - for { - chr := make([]byte, 1) - n, err := u.Reader.Read(chr) - - if n == 1 { - if chr[0] == '\n' { - return line, nil - } else if chr[0] == '\r' { - continue - } - - line += string(chr) - } - - if err != nil { - return "", err - } - } -} - -type readWriter struct { - io.Reader - io.Writer -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/LICENSE b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/LICENSE deleted file mode 100644 index 1a9893b43..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/NOTICE b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/NOTICE deleted file mode 100644 index 2ca8bb6ed..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -kapp-controller - -Copyright (c) 2019 - Present Pivotal Software, Inc. All Rights Reserved. - -This product is licensed to you under the Apache License, Version 2.0 (the "License"). You may not use this product except in compliance with the License. - -This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/doc.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/doc.go deleted file mode 100644 index ad1e884ca..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -// +k8s:deepcopy-gen=package -// +k8s:defaulter-gen=TypeMeta -// +groupName=kappctrl.k14s.io -package v1alpha1 diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.pb.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.pb.go deleted file mode 100644 index cb7a0d94e..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.pb.go +++ /dev/null @@ -1,10020 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - v1alpha1 "carvel.dev/vendir/pkg/vendir/versions/v1alpha1" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AppCluster) Reset() { *m = AppCluster{} } -func (*AppCluster) ProtoMessage() {} -func (*AppCluster) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{0} -} -func (m *AppCluster) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppCluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppCluster.Merge(m, src) -} -func (m *AppCluster) XXX_Size() int { - return m.Size() -} -func (m *AppCluster) XXX_DiscardUnknown() { - xxx_messageInfo_AppCluster.DiscardUnknown(m) -} - -var xxx_messageInfo_AppCluster proto.InternalMessageInfo - -func (m *AppClusterKubeconfigSecretRef) Reset() { *m = AppClusterKubeconfigSecretRef{} } -func (*AppClusterKubeconfigSecretRef) ProtoMessage() {} -func (*AppClusterKubeconfigSecretRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{1} -} -func (m *AppClusterKubeconfigSecretRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppClusterKubeconfigSecretRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppClusterKubeconfigSecretRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppClusterKubeconfigSecretRef.Merge(m, src) -} -func (m *AppClusterKubeconfigSecretRef) XXX_Size() int { - return m.Size() -} -func (m *AppClusterKubeconfigSecretRef) XXX_DiscardUnknown() { - xxx_messageInfo_AppClusterKubeconfigSecretRef.DiscardUnknown(m) -} - -var xxx_messageInfo_AppClusterKubeconfigSecretRef proto.InternalMessageInfo - -func (m *AppDeploy) Reset() { *m = AppDeploy{} } -func (*AppDeploy) ProtoMessage() {} -func (*AppDeploy) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{2} -} -func (m *AppDeploy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppDeploy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppDeploy) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppDeploy.Merge(m, src) -} -func (m *AppDeploy) XXX_Size() int { - return m.Size() -} -func (m *AppDeploy) XXX_DiscardUnknown() { - xxx_messageInfo_AppDeploy.DiscardUnknown(m) -} - -var xxx_messageInfo_AppDeploy proto.InternalMessageInfo - -func (m *AppDeployKapp) Reset() { *m = AppDeployKapp{} } -func (*AppDeployKapp) ProtoMessage() {} -func (*AppDeployKapp) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{3} -} -func (m *AppDeployKapp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppDeployKapp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppDeployKapp) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppDeployKapp.Merge(m, src) -} -func (m *AppDeployKapp) XXX_Size() int { - return m.Size() -} -func (m *AppDeployKapp) XXX_DiscardUnknown() { - xxx_messageInfo_AppDeployKapp.DiscardUnknown(m) -} - -var xxx_messageInfo_AppDeployKapp proto.InternalMessageInfo - -func (m *AppDeployKappDelete) Reset() { *m = AppDeployKappDelete{} } -func (*AppDeployKappDelete) ProtoMessage() {} -func (*AppDeployKappDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{4} -} -func (m *AppDeployKappDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppDeployKappDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppDeployKappDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppDeployKappDelete.Merge(m, src) -} -func (m *AppDeployKappDelete) XXX_Size() int { - return m.Size() -} -func (m *AppDeployKappDelete) XXX_DiscardUnknown() { - xxx_messageInfo_AppDeployKappDelete.DiscardUnknown(m) -} - -var xxx_messageInfo_AppDeployKappDelete proto.InternalMessageInfo - -func (m *AppDeployKappInspect) Reset() { *m = AppDeployKappInspect{} } -func (*AppDeployKappInspect) ProtoMessage() {} -func (*AppDeployKappInspect) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{5} -} -func (m *AppDeployKappInspect) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppDeployKappInspect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppDeployKappInspect) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppDeployKappInspect.Merge(m, src) -} -func (m *AppDeployKappInspect) XXX_Size() int { - return m.Size() -} -func (m *AppDeployKappInspect) XXX_DiscardUnknown() { - xxx_messageInfo_AppDeployKappInspect.DiscardUnknown(m) -} - -var xxx_messageInfo_AppDeployKappInspect proto.InternalMessageInfo - -func (m *AppFetch) Reset() { *m = AppFetch{} } -func (*AppFetch) ProtoMessage() {} -func (*AppFetch) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{6} -} -func (m *AppFetch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetch) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetch.Merge(m, src) -} -func (m *AppFetch) XXX_Size() int { - return m.Size() -} -func (m *AppFetch) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetch.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetch proto.InternalMessageInfo - -func (m *AppFetchGit) Reset() { *m = AppFetchGit{} } -func (*AppFetchGit) ProtoMessage() {} -func (*AppFetchGit) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{7} -} -func (m *AppFetchGit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchGit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchGit) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchGit.Merge(m, src) -} -func (m *AppFetchGit) XXX_Size() int { - return m.Size() -} -func (m *AppFetchGit) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchGit.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchGit proto.InternalMessageInfo - -func (m *AppFetchHTTP) Reset() { *m = AppFetchHTTP{} } -func (*AppFetchHTTP) ProtoMessage() {} -func (*AppFetchHTTP) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{8} -} -func (m *AppFetchHTTP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchHTTP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchHTTP) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchHTTP.Merge(m, src) -} -func (m *AppFetchHTTP) XXX_Size() int { - return m.Size() -} -func (m *AppFetchHTTP) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchHTTP.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchHTTP proto.InternalMessageInfo - -func (m *AppFetchHelmChart) Reset() { *m = AppFetchHelmChart{} } -func (*AppFetchHelmChart) ProtoMessage() {} -func (*AppFetchHelmChart) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{9} -} -func (m *AppFetchHelmChart) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchHelmChart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchHelmChart) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchHelmChart.Merge(m, src) -} -func (m *AppFetchHelmChart) XXX_Size() int { - return m.Size() -} -func (m *AppFetchHelmChart) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchHelmChart.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchHelmChart proto.InternalMessageInfo - -func (m *AppFetchHelmChartRepo) Reset() { *m = AppFetchHelmChartRepo{} } -func (*AppFetchHelmChartRepo) ProtoMessage() {} -func (*AppFetchHelmChartRepo) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{10} -} -func (m *AppFetchHelmChartRepo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchHelmChartRepo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchHelmChartRepo) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchHelmChartRepo.Merge(m, src) -} -func (m *AppFetchHelmChartRepo) XXX_Size() int { - return m.Size() -} -func (m *AppFetchHelmChartRepo) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchHelmChartRepo.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchHelmChartRepo proto.InternalMessageInfo - -func (m *AppFetchImage) Reset() { *m = AppFetchImage{} } -func (*AppFetchImage) ProtoMessage() {} -func (*AppFetchImage) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{11} -} -func (m *AppFetchImage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchImage) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchImage.Merge(m, src) -} -func (m *AppFetchImage) XXX_Size() int { - return m.Size() -} -func (m *AppFetchImage) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchImage.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchImage proto.InternalMessageInfo - -func (m *AppFetchImgpkgBundle) Reset() { *m = AppFetchImgpkgBundle{} } -func (*AppFetchImgpkgBundle) ProtoMessage() {} -func (*AppFetchImgpkgBundle) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{12} -} -func (m *AppFetchImgpkgBundle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchImgpkgBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchImgpkgBundle) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchImgpkgBundle.Merge(m, src) -} -func (m *AppFetchImgpkgBundle) XXX_Size() int { - return m.Size() -} -func (m *AppFetchImgpkgBundle) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchImgpkgBundle.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchImgpkgBundle proto.InternalMessageInfo - -func (m *AppFetchInline) Reset() { *m = AppFetchInline{} } -func (*AppFetchInline) ProtoMessage() {} -func (*AppFetchInline) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{13} -} -func (m *AppFetchInline) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchInline) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchInline) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchInline.Merge(m, src) -} -func (m *AppFetchInline) XXX_Size() int { - return m.Size() -} -func (m *AppFetchInline) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchInline.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchInline proto.InternalMessageInfo - -func (m *AppFetchInlineSource) Reset() { *m = AppFetchInlineSource{} } -func (*AppFetchInlineSource) ProtoMessage() {} -func (*AppFetchInlineSource) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{14} -} -func (m *AppFetchInlineSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchInlineSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchInlineSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchInlineSource.Merge(m, src) -} -func (m *AppFetchInlineSource) XXX_Size() int { - return m.Size() -} -func (m *AppFetchInlineSource) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchInlineSource.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchInlineSource proto.InternalMessageInfo - -func (m *AppFetchInlineSourceRef) Reset() { *m = AppFetchInlineSourceRef{} } -func (*AppFetchInlineSourceRef) ProtoMessage() {} -func (*AppFetchInlineSourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{15} -} -func (m *AppFetchInlineSourceRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchInlineSourceRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchInlineSourceRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchInlineSourceRef.Merge(m, src) -} -func (m *AppFetchInlineSourceRef) XXX_Size() int { - return m.Size() -} -func (m *AppFetchInlineSourceRef) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchInlineSourceRef.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchInlineSourceRef proto.InternalMessageInfo - -func (m *AppFetchLocalRef) Reset() { *m = AppFetchLocalRef{} } -func (*AppFetchLocalRef) ProtoMessage() {} -func (*AppFetchLocalRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{16} -} -func (m *AppFetchLocalRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppFetchLocalRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppFetchLocalRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppFetchLocalRef.Merge(m, src) -} -func (m *AppFetchLocalRef) XXX_Size() int { - return m.Size() -} -func (m *AppFetchLocalRef) XXX_DiscardUnknown() { - xxx_messageInfo_AppFetchLocalRef.DiscardUnknown(m) -} - -var xxx_messageInfo_AppFetchLocalRef proto.InternalMessageInfo - -func (m *AppSpec) Reset() { *m = AppSpec{} } -func (*AppSpec) ProtoMessage() {} -func (*AppSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{17} -} -func (m *AppSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppSpec.Merge(m, src) -} -func (m *AppSpec) XXX_Size() int { - return m.Size() -} -func (m *AppSpec) XXX_DiscardUnknown() { - xxx_messageInfo_AppSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_AppSpec proto.InternalMessageInfo - -func (m *AppTemplate) Reset() { *m = AppTemplate{} } -func (*AppTemplate) ProtoMessage() {} -func (*AppTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{18} -} -func (m *AppTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplate.Merge(m, src) -} -func (m *AppTemplate) XXX_Size() int { - return m.Size() -} -func (m *AppTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplate proto.InternalMessageInfo - -func (m *AppTemplateCue) Reset() { *m = AppTemplateCue{} } -func (*AppTemplateCue) ProtoMessage() {} -func (*AppTemplateCue) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{19} -} -func (m *AppTemplateCue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateCue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateCue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateCue.Merge(m, src) -} -func (m *AppTemplateCue) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateCue) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateCue.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateCue proto.InternalMessageInfo - -func (m *AppTemplateHelmTemplate) Reset() { *m = AppTemplateHelmTemplate{} } -func (*AppTemplateHelmTemplate) ProtoMessage() {} -func (*AppTemplateHelmTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{20} -} -func (m *AppTemplateHelmTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateHelmTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateHelmTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateHelmTemplate.Merge(m, src) -} -func (m *AppTemplateHelmTemplate) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateHelmTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateHelmTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateHelmTemplate proto.InternalMessageInfo - -func (m *AppTemplateJsonnet) Reset() { *m = AppTemplateJsonnet{} } -func (*AppTemplateJsonnet) ProtoMessage() {} -func (*AppTemplateJsonnet) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{21} -} -func (m *AppTemplateJsonnet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateJsonnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateJsonnet) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateJsonnet.Merge(m, src) -} -func (m *AppTemplateJsonnet) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateJsonnet) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateJsonnet.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateJsonnet proto.InternalMessageInfo - -func (m *AppTemplateKbld) Reset() { *m = AppTemplateKbld{} } -func (*AppTemplateKbld) ProtoMessage() {} -func (*AppTemplateKbld) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{22} -} -func (m *AppTemplateKbld) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateKbld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateKbld) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateKbld.Merge(m, src) -} -func (m *AppTemplateKbld) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateKbld) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateKbld.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateKbld proto.InternalMessageInfo - -func (m *AppTemplateKustomize) Reset() { *m = AppTemplateKustomize{} } -func (*AppTemplateKustomize) ProtoMessage() {} -func (*AppTemplateKustomize) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{23} -} -func (m *AppTemplateKustomize) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateKustomize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateKustomize) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateKustomize.Merge(m, src) -} -func (m *AppTemplateKustomize) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateKustomize) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateKustomize.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateKustomize proto.InternalMessageInfo - -func (m *AppTemplateSops) Reset() { *m = AppTemplateSops{} } -func (*AppTemplateSops) ProtoMessage() {} -func (*AppTemplateSops) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{24} -} -func (m *AppTemplateSops) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateSops) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateSops) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateSops.Merge(m, src) -} -func (m *AppTemplateSops) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateSops) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateSops.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateSops proto.InternalMessageInfo - -func (m *AppTemplateSopsAge) Reset() { *m = AppTemplateSopsAge{} } -func (*AppTemplateSopsAge) ProtoMessage() {} -func (*AppTemplateSopsAge) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{25} -} -func (m *AppTemplateSopsAge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateSopsAge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateSopsAge) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateSopsAge.Merge(m, src) -} -func (m *AppTemplateSopsAge) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateSopsAge) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateSopsAge.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateSopsAge proto.InternalMessageInfo - -func (m *AppTemplateSopsPGP) Reset() { *m = AppTemplateSopsPGP{} } -func (*AppTemplateSopsPGP) ProtoMessage() {} -func (*AppTemplateSopsPGP) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{26} -} -func (m *AppTemplateSopsPGP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateSopsPGP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateSopsPGP) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateSopsPGP.Merge(m, src) -} -func (m *AppTemplateSopsPGP) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateSopsPGP) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateSopsPGP.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateSopsPGP proto.InternalMessageInfo - -func (m *AppTemplateSopsPrivateKeysSecretRef) Reset() { *m = AppTemplateSopsPrivateKeysSecretRef{} } -func (*AppTemplateSopsPrivateKeysSecretRef) ProtoMessage() {} -func (*AppTemplateSopsPrivateKeysSecretRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{27} -} -func (m *AppTemplateSopsPrivateKeysSecretRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateSopsPrivateKeysSecretRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateSopsPrivateKeysSecretRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateSopsPrivateKeysSecretRef.Merge(m, src) -} -func (m *AppTemplateSopsPrivateKeysSecretRef) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateSopsPrivateKeysSecretRef) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateSopsPrivateKeysSecretRef.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateSopsPrivateKeysSecretRef proto.InternalMessageInfo - -func (m *AppTemplateValuesDownwardAPI) Reset() { *m = AppTemplateValuesDownwardAPI{} } -func (*AppTemplateValuesDownwardAPI) ProtoMessage() {} -func (*AppTemplateValuesDownwardAPI) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{28} -} -func (m *AppTemplateValuesDownwardAPI) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateValuesDownwardAPI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateValuesDownwardAPI) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateValuesDownwardAPI.Merge(m, src) -} -func (m *AppTemplateValuesDownwardAPI) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateValuesDownwardAPI) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateValuesDownwardAPI.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateValuesDownwardAPI proto.InternalMessageInfo - -func (m *AppTemplateValuesDownwardAPIItem) Reset() { *m = AppTemplateValuesDownwardAPIItem{} } -func (*AppTemplateValuesDownwardAPIItem) ProtoMessage() {} -func (*AppTemplateValuesDownwardAPIItem) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{29} -} -func (m *AppTemplateValuesDownwardAPIItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateValuesDownwardAPIItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateValuesDownwardAPIItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateValuesDownwardAPIItem.Merge(m, src) -} -func (m *AppTemplateValuesDownwardAPIItem) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateValuesDownwardAPIItem) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateValuesDownwardAPIItem.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateValuesDownwardAPIItem proto.InternalMessageInfo - -func (m *AppTemplateValuesSource) Reset() { *m = AppTemplateValuesSource{} } -func (*AppTemplateValuesSource) ProtoMessage() {} -func (*AppTemplateValuesSource) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{30} -} -func (m *AppTemplateValuesSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateValuesSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateValuesSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateValuesSource.Merge(m, src) -} -func (m *AppTemplateValuesSource) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateValuesSource) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateValuesSource.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateValuesSource proto.InternalMessageInfo - -func (m *AppTemplateValuesSourceRef) Reset() { *m = AppTemplateValuesSourceRef{} } -func (*AppTemplateValuesSourceRef) ProtoMessage() {} -func (*AppTemplateValuesSourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{31} -} -func (m *AppTemplateValuesSourceRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateValuesSourceRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateValuesSourceRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateValuesSourceRef.Merge(m, src) -} -func (m *AppTemplateValuesSourceRef) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateValuesSourceRef) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateValuesSourceRef.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateValuesSourceRef proto.InternalMessageInfo - -func (m *AppTemplateYtt) Reset() { *m = AppTemplateYtt{} } -func (*AppTemplateYtt) ProtoMessage() {} -func (*AppTemplateYtt) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{32} -} -func (m *AppTemplateYtt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppTemplateYtt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppTemplateYtt) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppTemplateYtt.Merge(m, src) -} -func (m *AppTemplateYtt) XXX_Size() int { - return m.Size() -} -func (m *AppTemplateYtt) XXX_DiscardUnknown() { - xxx_messageInfo_AppTemplateYtt.DiscardUnknown(m) -} - -var xxx_messageInfo_AppTemplateYtt proto.InternalMessageInfo - -func (m *Condition) Reset() { *m = Condition{} } -func (*Condition) ProtoMessage() {} -func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{33} -} -func (m *Condition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Condition) XXX_Merge(src proto.Message) { - xxx_messageInfo_Condition.Merge(m, src) -} -func (m *Condition) XXX_Size() int { - return m.Size() -} -func (m *Condition) XXX_DiscardUnknown() { - xxx_messageInfo_Condition.DiscardUnknown(m) -} - -var xxx_messageInfo_Condition proto.InternalMessageInfo - -func (m *GenericStatus) Reset() { *m = GenericStatus{} } -func (*GenericStatus) ProtoMessage() {} -func (*GenericStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{34} -} -func (m *GenericStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenericStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GenericStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenericStatus.Merge(m, src) -} -func (m *GenericStatus) XXX_Size() int { - return m.Size() -} -func (m *GenericStatus) XXX_DiscardUnknown() { - xxx_messageInfo_GenericStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_GenericStatus proto.InternalMessageInfo - -func (m *KubernetesAPIs) Reset() { *m = KubernetesAPIs{} } -func (*KubernetesAPIs) ProtoMessage() {} -func (*KubernetesAPIs) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{35} -} -func (m *KubernetesAPIs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KubernetesAPIs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *KubernetesAPIs) XXX_Merge(src proto.Message) { - xxx_messageInfo_KubernetesAPIs.Merge(m, src) -} -func (m *KubernetesAPIs) XXX_Size() int { - return m.Size() -} -func (m *KubernetesAPIs) XXX_DiscardUnknown() { - xxx_messageInfo_KubernetesAPIs.DiscardUnknown(m) -} - -var xxx_messageInfo_KubernetesAPIs proto.InternalMessageInfo - -func (m *Version) Reset() { *m = Version{} } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_e972ccf085273df7, []int{36} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return m.Size() -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AppCluster)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppCluster") - proto.RegisterType((*AppClusterKubeconfigSecretRef)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppClusterKubeconfigSecretRef") - proto.RegisterType((*AppDeploy)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppDeploy") - proto.RegisterType((*AppDeployKapp)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppDeployKapp") - proto.RegisterType((*AppDeployKappDelete)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppDeployKappDelete") - proto.RegisterType((*AppDeployKappInspect)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppDeployKappInspect") - proto.RegisterType((*AppFetch)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetch") - proto.RegisterType((*AppFetchGit)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchGit") - proto.RegisterType((*AppFetchHTTP)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchHTTP") - proto.RegisterType((*AppFetchHelmChart)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchHelmChart") - proto.RegisterType((*AppFetchHelmChartRepo)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchHelmChartRepo") - proto.RegisterType((*AppFetchImage)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchImage") - proto.RegisterType((*AppFetchImgpkgBundle)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchImgpkgBundle") - proto.RegisterType((*AppFetchInline)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchInline") - proto.RegisterMapType((map[string]string)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchInline.PathsEntry") - proto.RegisterType((*AppFetchInlineSource)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchInlineSource") - proto.RegisterType((*AppFetchInlineSourceRef)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchInlineSourceRef") - proto.RegisterType((*AppFetchLocalRef)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppFetchLocalRef") - proto.RegisterType((*AppSpec)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppSpec") - proto.RegisterType((*AppTemplate)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplate") - proto.RegisterType((*AppTemplateCue)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateCue") - proto.RegisterType((*AppTemplateHelmTemplate)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateHelmTemplate") - proto.RegisterType((*AppTemplateJsonnet)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateJsonnet") - proto.RegisterType((*AppTemplateKbld)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateKbld") - proto.RegisterType((*AppTemplateKustomize)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateKustomize") - proto.RegisterType((*AppTemplateSops)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateSops") - proto.RegisterType((*AppTemplateSopsAge)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateSopsAge") - proto.RegisterType((*AppTemplateSopsPGP)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateSopsPGP") - proto.RegisterType((*AppTemplateSopsPrivateKeysSecretRef)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateSopsPrivateKeysSecretRef") - proto.RegisterType((*AppTemplateValuesDownwardAPI)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateValuesDownwardAPI") - proto.RegisterType((*AppTemplateValuesDownwardAPIItem)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateValuesDownwardAPIItem") - proto.RegisterType((*AppTemplateValuesSource)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateValuesSource") - proto.RegisterType((*AppTemplateValuesSourceRef)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateValuesSourceRef") - proto.RegisterType((*AppTemplateYtt)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.AppTemplateYtt") - proto.RegisterType((*Condition)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.Condition") - proto.RegisterType((*GenericStatus)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.GenericStatus") - proto.RegisterType((*KubernetesAPIs)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.KubernetesAPIs") - proto.RegisterType((*Version)(nil), "github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1.Version") -} - -func init() { - proto.RegisterFile("github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.proto", fileDescriptor_e972ccf085273df7) -} - -var fileDescriptor_e972ccf085273df7 = []byte{ - // 2587 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x5d, 0x6c, 0x1b, 0x59, - 0xf5, 0xef, 0xc4, 0x4e, 0x1c, 0x9f, 0x24, 0x6d, 0x7a, 0xdb, 0xfc, 0xeb, 0x7f, 0xa1, 0x71, 0x35, - 0x2b, 0xad, 0x5a, 0x41, 0x6d, 0x35, 0xea, 0x96, 0x0a, 0xa4, 0x15, 0x76, 0xd2, 0xa4, 0x69, 0x9a, - 0xd6, 0xba, 0x4e, 0xcb, 0x76, 0x61, 0xb7, 0x3b, 0x19, 0x5f, 0xdb, 0x83, 0xc7, 0x33, 0xc3, 0xcc, - 0x1d, 0x37, 0x5e, 0x2d, 0x12, 0x14, 0x84, 0x16, 0x2d, 0x12, 0x15, 0xd2, 0x2e, 0x08, 0x24, 0x5e, - 0x90, 0x78, 0x47, 0xf0, 0x88, 0x90, 0x10, 0x2f, 0x7d, 0x41, 0x5a, 0x69, 0x5f, 0x16, 0x09, 0x19, - 0x6a, 0x9e, 0x78, 0xde, 0xb7, 0x7d, 0x42, 0xf7, 0x63, 0xbe, 0x6c, 0xa7, 0x09, 0x52, 0xa6, 0xdb, - 0x15, 0x6f, 0x33, 0xe7, 0x9e, 0x7b, 0x7e, 0xe7, 0x9e, 0x7b, 0xee, 0x39, 0xe7, 0x9e, 0x19, 0xb8, - 0xdf, 0x32, 0x68, 0xdb, 0xdf, 0x2d, 0xe9, 0x76, 0xb7, 0xdc, 0xeb, 0x3e, 0xd4, 0x5c, 0x72, 0x89, - 0x6a, 0xd6, 0xdb, 0x7e, 0x59, 0xd7, 0xdc, 0x1e, 0x31, 0x2f, 0x75, 0x34, 0xc7, 0xb9, 0xa4, 0xdb, - 0x16, 0x75, 0x6d, 0xd3, 0x24, 0x6e, 0xd9, 0xe9, 0xb4, 0xca, 0x9a, 0x63, 0x78, 0x65, 0x36, 0xa0, - 0x53, 0xd7, 0x2c, 0xf7, 0x2e, 0x6b, 0xa6, 0xd3, 0xd6, 0x2e, 0x97, 0x5b, 0xc4, 0x22, 0xae, 0x46, - 0x49, 0xa3, 0xe4, 0xb8, 0x36, 0xb5, 0xd1, 0x66, 0x24, 0xba, 0x24, 0x44, 0x3f, 0xe0, 0xa2, 0x4b, - 0x42, 0xf4, 0x03, 0x26, 0xe1, 0x41, 0x24, 0xba, 0xe4, 0x74, 0x5a, 0x25, 0x26, 0xba, 0x14, 0x88, - 0x2e, 0x05, 0xa2, 0xcf, 0xbe, 0x2a, 0xa6, 0x94, 0x1a, 0xa4, 0x57, 0xee, 0x11, 0xab, 0x61, 0x08, - 0x45, 0xe4, 0x63, 0x8f, 0xb8, 0x9e, 0x61, 0x5b, 0xde, 0xbe, 0xaa, 0x9c, 0xbd, 0x14, 0x5b, 0x65, - 0xcb, 0x6e, 0xd9, 0x65, 0x4e, 0xde, 0xf5, 0x9b, 0xfc, 0x8d, 0xbf, 0xf0, 0x27, 0xc9, 0x7e, 0xa5, - 0x73, 0xcd, 0x2b, 0x19, 0x36, 0x5b, 0x68, 0x57, 0xd3, 0xdb, 0x86, 0x45, 0xdc, 0x7e, 0xb4, 0xf2, - 0x2e, 0xa1, 0x5a, 0xb9, 0x37, 0x0e, 0x52, 0xde, 0x6f, 0x96, 0xeb, 0x5b, 0xd4, 0xe8, 0x92, 0xb1, - 0x09, 0x57, 0x0f, 0x9a, 0xe0, 0xe9, 0x6d, 0xd2, 0xd5, 0x46, 0xe7, 0xa9, 0x9f, 0x28, 0x00, 0x15, - 0xc7, 0x59, 0x35, 0x7d, 0x8f, 0x12, 0x17, 0x95, 0x21, 0x6f, 0x69, 0x5d, 0xe2, 0x39, 0x9a, 0x4e, - 0x0a, 0xca, 0x79, 0xe5, 0x42, 0xbe, 0x7a, 0xf2, 0xc9, 0xa0, 0x78, 0x6c, 0x38, 0x28, 0xe6, 0x6f, - 0x07, 0x03, 0x38, 0xe2, 0x41, 0xbf, 0x57, 0xe0, 0x54, 0xc7, 0xdf, 0x25, 0xba, 0x6d, 0x35, 0x8d, - 0x56, 0x9d, 0xe8, 0x2e, 0xa1, 0x98, 0x34, 0x0b, 0x53, 0xe7, 0x95, 0x0b, 0x73, 0x2b, 0xed, 0xd2, - 0x91, 0xed, 0x5b, 0x29, 0xd2, 0x72, 0x6b, 0x1c, 0xaf, 0x7a, 0x66, 0x38, 0x28, 0x9e, 0x9a, 0x30, - 0x80, 0x27, 0x69, 0xa7, 0xbe, 0x05, 0xe7, 0x9e, 0x29, 0x0e, 0x9d, 0x87, 0x2c, 0x5b, 0xa3, 0x34, - 0xc1, 0xbc, 0x34, 0x41, 0x96, 0x99, 0x00, 0xf3, 0x11, 0x74, 0x0e, 0x32, 0x1d, 0xd2, 0xe7, 0xeb, - 0xcc, 0x57, 0xe7, 0x24, 0x43, 0x66, 0x8b, 0xf4, 0x31, 0xa3, 0xab, 0x3f, 0x50, 0x20, 0x5f, 0x71, - 0x9c, 0x35, 0xe2, 0x98, 0x76, 0x1f, 0xf5, 0x20, 0xcb, 0x16, 0xc4, 0xc5, 0xcd, 0xad, 0xbc, 0x76, - 0xb4, 0x56, 0x11, 0x18, 0x5b, 0x9a, 0xe3, 0x54, 0x67, 0x99, 0x92, 0xec, 0x09, 0x73, 0x3c, 0xf5, - 0x83, 0x0c, 0x2c, 0x24, 0x38, 0xd0, 0xcb, 0x30, 0x63, 0x58, 0xd4, 0xbe, 0xed, 0xc9, 0xa5, 0x1d, - 0x97, 0x9a, 0xcf, 0x6c, 0x72, 0x2a, 0x96, 0xa3, 0xa8, 0x08, 0xd3, 0x5d, 0xcd, 0xb9, 0xed, 0x15, - 0xa6, 0xce, 0x67, 0x2e, 0xe4, 0xab, 0xf9, 0xe1, 0xa0, 0x38, 0xbd, 0xcd, 0x08, 0x58, 0xd0, 0x51, - 0x09, 0xc0, 0xd5, 0x1e, 0xde, 0x71, 0x28, 0x3b, 0x2d, 0x85, 0x0c, 0xe7, 0x3a, 0x3e, 0x1c, 0x14, - 0x01, 0x87, 0x54, 0x1c, 0xe3, 0x40, 0x3f, 0x52, 0x20, 0x67, 0x58, 0x9e, 0x43, 0x74, 0x5a, 0xc8, - 0x72, 0x33, 0x3c, 0x48, 0xcb, 0x0c, 0x9b, 0x02, 0xa6, 0x3a, 0x37, 0x1c, 0x14, 0x73, 0xf2, 0x05, - 0x07, 0xe0, 0xe8, 0x91, 0x02, 0x33, 0x0d, 0x62, 0x12, 0x4a, 0x0a, 0xd3, 0x5c, 0x8f, 0x37, 0xd3, - 0xd2, 0x63, 0x8d, 0xa3, 0x54, 0x81, 0x99, 0x57, 0x3c, 0x63, 0x89, 0xac, 0x5e, 0x87, 0x53, 0x13, - 0x58, 0x47, 0x8c, 0xaa, 0x1c, 0x64, 0x54, 0x75, 0x1d, 0x4e, 0x4f, 0x5a, 0xf9, 0x7f, 0x2d, 0xe7, - 0x0f, 0x33, 0x30, 0x5b, 0x71, 0x9c, 0x75, 0x42, 0xf5, 0x36, 0xfa, 0x2e, 0x73, 0x11, 0xd3, 0xb0, - 0x88, 0x74, 0xd7, 0xfb, 0x47, 0x6b, 0x1f, 0x0e, 0xb2, 0xc9, 0x01, 0x84, 0x69, 0xc4, 0x33, 0x96, - 0xa0, 0xa8, 0x0f, 0xd3, 0x46, 0x57, 0x6b, 0x11, 0x19, 0x42, 0x5e, 0x4b, 0x03, 0x9d, 0xc9, 0x17, - 0x3e, 0xcd, 0x1f, 0xb1, 0x40, 0x44, 0x3e, 0x64, 0xdb, 0x94, 0x3a, 0x85, 0x0c, 0x47, 0xfe, 0x46, - 0x0a, 0xc8, 0x37, 0x76, 0x76, 0x6a, 0xe2, 0x94, 0xb2, 0x27, 0xcc, 0xe1, 0xd0, 0x77, 0x20, 0xd3, - 0x32, 0x82, 0x53, 0x71, 0x2f, 0x05, 0xd4, 0x0d, 0x83, 0x56, 0x73, 0x2c, 0x3c, 0x6d, 0x18, 0x14, - 0x33, 0x2c, 0xf4, 0x63, 0x05, 0xf2, 0x6d, 0x62, 0x76, 0x57, 0xdb, 0x9a, 0x4b, 0xe5, 0x39, 0xf8, - 0x56, 0x1a, 0xeb, 0x0d, 0x30, 0xaa, 0x0b, 0x2c, 0x85, 0x84, 0xaf, 0x38, 0x42, 0x47, 0xef, 0x2b, - 0x30, 0x6f, 0x74, 0x5b, 0x4e, 0xa7, 0x55, 0xf5, 0xad, 0x86, 0x49, 0x0a, 0x33, 0x69, 0x84, 0x07, - 0xb9, 0xf1, 0x11, 0x4c, 0x75, 0x71, 0x38, 0x28, 0xce, 0xc7, 0x29, 0x38, 0xa1, 0x06, 0xcb, 0x01, - 0x8e, 0x46, 0xdb, 0x85, 0x5c, 0x32, 0x07, 0xd4, 0x34, 0xda, 0xc6, 0x7c, 0x44, 0x7d, 0x94, 0x85, - 0xb9, 0x98, 0x8d, 0x59, 0x4e, 0xf0, 0x5d, 0x53, 0x46, 0xd6, 0x30, 0x27, 0xdc, 0xc5, 0xb7, 0x30, - 0xa3, 0xb3, 0x61, 0x57, 0xa6, 0xc6, 0xd8, 0x30, 0x4b, 0x52, 0x8c, 0x8e, 0x7a, 0x30, 0xef, 0x92, - 0x66, 0x9d, 0x98, 0x44, 0x67, 0xa7, 0x52, 0x9a, 0xe1, 0xeb, 0xa5, 0xa8, 0x5e, 0x29, 0x89, 0x22, - 0x85, 0xaf, 0x54, 0x3e, 0x06, 0xf5, 0x4a, 0xb4, 0xd6, 0x7b, 0x82, 0x12, 0xca, 0x11, 0xeb, 0xc4, - 0x31, 0xc9, 0x38, 0x81, 0x83, 0xde, 0x55, 0x20, 0xef, 0x85, 0x89, 0x5b, 0xf8, 0xfe, 0x37, 0x53, - 0x30, 0xfe, 0x2d, 0x5b, 0xd7, 0x4c, 0x96, 0xab, 0xb9, 0x2b, 0x44, 0x19, 0x3a, 0x02, 0x47, 0x17, - 0x21, 0xe7, 0xf9, 0xbb, 0xcc, 0xc2, 0xfc, 0x34, 0xe4, 0xab, 0x27, 0xa4, 0x95, 0x72, 0x75, 0x41, - 0xc6, 0xc1, 0x38, 0xfa, 0x1a, 0x2c, 0x98, 0x4d, 0xaf, 0xde, 0x31, 0x9c, 0x7a, 0xd7, 0x6f, 0xb4, - 0x44, 0x30, 0x9f, 0xad, 0x2e, 0xc9, 0x09, 0x0b, 0xb7, 0xd6, 0xeb, 0xd1, 0x20, 0x4e, 0xf2, 0xa2, - 0x9b, 0x80, 0x9a, 0xb6, 0xab, 0x13, 0x7e, 0x1c, 0x35, 0xcf, 0xd0, 0x2b, 0xbe, 0xdc, 0xe8, 0xd9, - 0xea, 0x59, 0x29, 0x01, 0xad, 0x8f, 0x71, 0xe0, 0x09, 0xb3, 0xd4, 0xf7, 0xa6, 0x60, 0x3e, 0x7e, - 0xbc, 0x0f, 0xf2, 0x82, 0x97, 0x61, 0xc6, 0x6b, 0x6b, 0x2b, 0xaf, 0x5c, 0x95, 0x8e, 0x10, 0x66, - 0xe0, 0xfa, 0x8d, 0xca, 0xca, 0x2b, 0x57, 0xb1, 0x1c, 0xfd, 0x7c, 0x6e, 0x0b, 0xab, 0x27, 0x4f, - 0x8e, 0x1d, 0xfe, 0x43, 0x94, 0x53, 0x17, 0x21, 0x27, 0x5d, 0x59, 0x9a, 0x25, 0x84, 0x90, 0xfe, - 0x8c, 0x83, 0x71, 0xf4, 0x58, 0x01, 0x70, 0x89, 0x63, 0x7b, 0x06, 0xb5, 0xdd, 0xbe, 0xb4, 0xcc, - 0x5b, 0x69, 0x06, 0x2f, 0x4c, 0x1c, 0x5b, 0xe6, 0xcf, 0x10, 0x17, 0xc7, 0x74, 0x50, 0xff, 0xac, - 0xc0, 0xd2, 0xc4, 0x59, 0x07, 0x39, 0x43, 0x72, 0x93, 0xa7, 0x3e, 0xc3, 0x4d, 0x56, 0xff, 0x3e, - 0xc5, 0x6b, 0xc5, 0x28, 0x41, 0x1e, 0xa4, 0x7b, 0x0f, 0xe6, 0xa9, 0xd6, 0x8a, 0xe2, 0x55, 0xf6, - 0x28, 0xe3, 0xd5, 0x4e, 0x4c, 0x32, 0x4e, 0xe0, 0xbc, 0x40, 0x36, 0x8b, 0x1f, 0x8c, 0xcc, 0x01, - 0x07, 0xe3, 0x4f, 0x53, 0xbc, 0x56, 0x1b, 0x4b, 0x43, 0xe8, 0xa5, 0xa0, 0xde, 0x11, 0x76, 0x5e, - 0x90, 0x12, 0x92, 0x95, 0xc9, 0xa8, 0xad, 0x33, 0xff, 0x73, 0xb6, 0x56, 0xff, 0x3d, 0x05, 0xc7, - 0x93, 0xe5, 0x23, 0xab, 0x62, 0xa6, 0x59, 0x22, 0x16, 0x25, 0xee, 0xdc, 0x4a, 0x23, 0xb5, 0x4a, - 0xb5, 0xc4, 0xf6, 0xd0, 0xbb, 0x6e, 0x51, 0xb7, 0x1f, 0xed, 0x10, 0xa7, 0x61, 0xa1, 0x01, 0x8b, - 0x4a, 0x79, 0xfe, 0xb4, 0xee, 0xda, 0x5d, 0x7e, 0x6b, 0x4a, 0xa9, 0x84, 0xe1, 0xfa, 0xd4, 0x6d, - 0xdf, 0xd5, 0x49, 0x74, 0x37, 0xaf, 0x05, 0xc8, 0x38, 0x52, 0xe2, 0xec, 0x35, 0x80, 0x48, 0x6d, - 0xb4, 0x28, 0x2e, 0xac, 0xdc, 0xcb, 0xf8, 0x1d, 0x15, 0x9d, 0x86, 0xe9, 0x9e, 0x66, 0xfa, 0xa2, - 0xd2, 0xce, 0x63, 0xf1, 0xf2, 0xd5, 0xa9, 0x6b, 0x8a, 0xfa, 0x97, 0xb8, 0xb3, 0xc6, 0x00, 0xd1, - 0x4f, 0x13, 0xfe, 0x20, 0xee, 0x07, 0xbb, 0x29, 0xaf, 0xf2, 0xd9, 0x47, 0xf0, 0x17, 0x0a, 0xcc, - 0x8b, 0xdb, 0xfb, 0xb6, 0xe6, 0x44, 0x4e, 0xfa, 0x3c, 0x94, 0xe2, 0x87, 0x67, 0x35, 0x86, 0x8d, - 0x13, 0x9a, 0xa8, 0x7b, 0x70, 0x66, 0x9f, 0xa9, 0xac, 0x7a, 0x69, 0x18, 0x2e, 0xd1, 0x59, 0xf6, - 0xe0, 0xe1, 0x43, 0x24, 0xbd, 0xb0, 0x7a, 0x59, 0x8b, 0x0f, 0xe2, 0x24, 0xef, 0xc1, 0xd9, 0x54, - 0xbd, 0x02, 0x8b, 0xa3, 0x27, 0xeb, 0x10, 0xb3, 0x1e, 0xe5, 0x20, 0x57, 0x71, 0x9c, 0xba, 0x43, - 0x74, 0x56, 0x21, 0x79, 0xc4, 0xed, 0x19, 0x3a, 0xa9, 0xe8, 0xba, 0xed, 0x5b, 0xf4, 0x76, 0x34, - 0x37, 0xac, 0x90, 0xea, 0x63, 0x1c, 0x78, 0xc2, 0x2c, 0xf4, 0x0e, 0xe4, 0x74, 0xd1, 0x6a, 0x91, - 0x9b, 0x73, 0x37, 0x95, 0xb6, 0x90, 0xb8, 0xef, 0xcb, 0x17, 0x1c, 0x40, 0xa2, 0x3d, 0x98, 0x6e, - 0x32, 0x43, 0xf0, 0x1e, 0xc5, 0xdc, 0x4a, 0x3d, 0x05, 0xc7, 0x88, 0x42, 0x02, 0x7f, 0xc5, 0x02, - 0x10, 0xfd, 0x50, 0x81, 0x59, 0x4a, 0xba, 0x8e, 0xa9, 0x51, 0x52, 0xc8, 0x72, 0xf4, 0x23, 0xbe, - 0xdd, 0xed, 0x48, 0xe9, 0xd5, 0x45, 0xa9, 0xc0, 0x6c, 0x40, 0xc1, 0x21, 0x32, 0x7a, 0x07, 0x66, - 0x1a, 0xbc, 0x43, 0x50, 0x98, 0xe6, 0x3a, 0xec, 0xa4, 0xd1, 0xef, 0x88, 0xca, 0x58, 0xf1, 0x8e, - 0x25, 0x26, 0x2b, 0x77, 0x1d, 0xcd, 0xf7, 0x48, 0x83, 0xdf, 0x67, 0x66, 0x23, 0xbe, 0x1a, 0xa7, - 0x62, 0x39, 0x8a, 0xbe, 0x0c, 0xb3, 0xba, 0x66, 0xe9, 0xc4, 0x24, 0x0d, 0x59, 0x88, 0x87, 0x6b, - 0x5a, 0x95, 0x74, 0x1c, 0x72, 0xa0, 0x37, 0x01, 0xbc, 0xbe, 0xa5, 0xd7, 0x88, 0x6b, 0xd8, 0x8d, - 0xc2, 0x2c, 0xf7, 0xaa, 0x52, 0x49, 0xf4, 0x40, 0x4b, 0xf1, 0x1e, 0x68, 0xa4, 0x7c, 0x97, 0x50, - 0xad, 0xd4, 0xbb, 0x5c, 0x5a, 0xf3, 0x5d, 0x8d, 0xe7, 0x3e, 0x5e, 0xd0, 0xd5, 0x43, 0x29, 0x38, - 0x26, 0x11, 0xad, 0x00, 0x58, 0xb6, 0x2d, 0xdb, 0x32, 0x85, 0x3c, 0xd7, 0x07, 0x49, 0x7d, 0xe0, - 0x76, 0x38, 0x82, 0x63, 0x5c, 0x68, 0x0d, 0x16, 0x1b, 0xa4, 0xa9, 0xf9, 0x26, 0x0d, 0x3b, 0xa5, - 0x05, 0xe0, 0x07, 0xa6, 0x20, 0x67, 0x2e, 0xae, 0x8d, 0x8c, 0xe3, 0xb1, 0x19, 0xea, 0xaf, 0x73, - 0xfc, 0x4e, 0x19, 0xec, 0x23, 0xa2, 0x90, 0xe9, 0x53, 0x9a, 0x4e, 0x2b, 0x26, 0x00, 0xb9, 0x4f, - 0x65, 0x7f, 0xe0, 0x3e, 0xa5, 0x98, 0xc1, 0xa1, 0x3d, 0xc8, 0x76, 0x76, 0xcd, 0x86, 0x3c, 0xaf, - 0xaf, 0xa7, 0x03, 0xbb, 0xb5, 0x6b, 0x36, 0x64, 0xcb, 0x72, 0xd7, 0x6c, 0x60, 0x8e, 0xc8, 0xe3, - 0x79, 0x9b, 0x98, 0xdd, 0x80, 0x49, 0x96, 0x3a, 0xbb, 0xe9, 0xa8, 0x70, 0x23, 0x86, 0x24, 0xe2, - 0x79, 0x9c, 0x82, 0x13, 0x9a, 0xa0, 0x9f, 0x28, 0x90, 0xef, 0xf8, 0x1e, 0xb5, 0xbb, 0xc6, 0xdb, - 0x24, 0x9d, 0x26, 0x66, 0x68, 0x9a, 0x00, 0x46, 0x64, 0xbe, 0xf0, 0x15, 0x47, 0x0a, 0xb0, 0xf0, - 0x92, 0xfb, 0xb6, 0x67, 0x5b, 0x16, 0x09, 0x3a, 0x38, 0x6f, 0xa4, 0xa3, 0xcc, 0x4d, 0x01, 0x22, - 0xe2, 0xab, 0x7c, 0xc1, 0x01, 0x34, 0x73, 0x15, 0xcf, 0x76, 0x3c, 0xd9, 0xae, 0x48, 0xc9, 0x55, - 0xea, 0xb6, 0xe3, 0x09, 0x57, 0x61, 0x4f, 0x98, 0x23, 0xb2, 0xa3, 0xa1, 0xfb, 0x84, 0x47, 0x8b, - 0xd4, 0x8e, 0xc6, 0xaa, 0x4f, 0xc4, 0xd1, 0x58, 0xf5, 0x09, 0x66, 0x70, 0xea, 0x47, 0xa2, 0x0e, - 0x8d, 0x31, 0xa0, 0x62, 0xbc, 0x0c, 0x95, 0xcd, 0xf2, 0x44, 0x71, 0xf8, 0x81, 0x02, 0xc0, 0xab, - 0xab, 0x78, 0x75, 0x98, 0x92, 0x4b, 0xdf, 0xe3, 0x38, 0xb2, 0x40, 0x0c, 0x63, 0xd6, 0xbd, 0x10, - 0x1d, 0xc7, 0x34, 0x41, 0x15, 0x38, 0x61, 0x58, 0x8e, 0x4f, 0xaf, 0xef, 0x39, 0x2e, 0xf1, 0xbc, - 0xe0, 0x6a, 0x91, 0xaf, 0x9e, 0x91, 0x13, 0x4f, 0x6c, 0x26, 0x87, 0xf1, 0x28, 0x3f, 0x0b, 0x7b, - 0xb6, 0x4f, 0x93, 0x32, 0xb2, 0xc9, 0xb0, 0x77, 0x67, 0x64, 0x1c, 0x8f, 0xcd, 0x50, 0xff, 0x96, - 0xe5, 0xc5, 0xd2, 0xa4, 0x73, 0x79, 0x88, 0xee, 0x41, 0xe2, 0xb3, 0xd5, 0xd4, 0x21, 0x3e, 0x5b, - 0x05, 0xbd, 0xbd, 0xcc, 0x7e, 0xbd, 0xbd, 0xd1, 0x2d, 0xcb, 0xbe, 0x30, 0x5b, 0xf6, 0x73, 0x05, - 0x4e, 0x76, 0xfc, 0x5d, 0xe2, 0x5a, 0x84, 0x12, 0x4f, 0xde, 0xe8, 0x64, 0x00, 0xc0, 0x47, 0xa8, - 0x9f, 0x94, 0x5c, 0x5d, 0x1a, 0x0e, 0x8a, 0x27, 0xb7, 0x46, 0x01, 0xf1, 0xb8, 0x0e, 0xe8, 0x7d, - 0x05, 0x8e, 0x47, 0xd4, 0x4a, 0x6d, 0x33, 0x08, 0x0a, 0x47, 0x79, 0x36, 0xb7, 0x12, 0x00, 0x55, - 0x34, 0x1c, 0x14, 0x8f, 0x27, 0x69, 0x78, 0x44, 0x09, 0xf5, 0x34, 0xa0, 0xf1, 0x68, 0xa6, 0xae, - 0xc0, 0x89, 0x91, 0x5c, 0x74, 0xe0, 0x39, 0x56, 0xff, 0x8f, 0x5f, 0x8b, 0xc6, 0x82, 0xb4, 0xfa, - 0xdb, 0xa9, 0x84, 0x30, 0x16, 0xa3, 0xd0, 0x1e, 0x64, 0x9c, 0x56, 0xf0, 0xc9, 0xef, 0x8d, 0xf4, - 0xc2, 0x62, 0x6d, 0xa3, 0x26, 0x22, 0x54, 0x6d, 0xa3, 0x86, 0x19, 0x64, 0xb4, 0x8c, 0xa9, 0x7d, - 0xc2, 0xd1, 0x1e, 0x64, 0xb4, 0x56, 0x90, 0x59, 0x53, 0x54, 0xad, 0xd2, 0x92, 0xc1, 0xb3, 0xd2, - 0x22, 0x98, 0x41, 0xaa, 0x1f, 0x29, 0x89, 0xbd, 0x90, 0x4c, 0xe8, 0x8f, 0x0a, 0x9c, 0x76, 0x5c, - 0xa3, 0xc7, 0x8c, 0x4a, 0xfa, 0x5e, 0x7d, 0xe4, 0x86, 0x69, 0xa5, 0x68, 0xbd, 0x09, 0xa8, 0xd5, - 0xc2, 0x70, 0x50, 0x3c, 0x3d, 0x69, 0x04, 0x4f, 0xd4, 0x72, 0xd2, 0xaa, 0x6a, 0x1b, 0xb5, 0xcf, - 0xfb, 0xaa, 0x36, 0xe0, 0xa5, 0x43, 0x88, 0x3d, 0xc4, 0xbd, 0xf2, 0x77, 0x0a, 0x7c, 0x71, 0x2c, - 0xda, 0xad, 0xd9, 0x0f, 0xad, 0x87, 0x9a, 0xdb, 0xa8, 0xd4, 0x36, 0xd1, 0x63, 0x05, 0xa6, 0x0d, - 0x4a, 0xba, 0x41, 0x1f, 0xa7, 0x93, 0x66, 0x98, 0x8d, 0x01, 0x6f, 0x52, 0xd2, 0x8d, 0x35, 0xdc, - 0x98, 0x06, 0x58, 0x28, 0xa2, 0xfe, 0x35, 0x0b, 0xe7, 0x0f, 0x9a, 0x7a, 0xb8, 0xc4, 0xd4, 0x34, - 0x88, 0xd9, 0x88, 0xdd, 0xf1, 0xc3, 0xc4, 0xb4, 0x1e, 0x0c, 0xe0, 0x88, 0x67, 0x9f, 0xe8, 0x9e, - 0x79, 0x01, 0xa2, 0xfb, 0x6f, 0x14, 0x58, 0x62, 0x72, 0x56, 0x43, 0xf9, 0x81, 0x76, 0xd9, 0xd4, - 0xb4, 0xfb, 0xff, 0xe1, 0xa0, 0xb8, 0xb4, 0x35, 0x09, 0x14, 0x4f, 0xd6, 0x65, 0x52, 0x0e, 0x9a, - 0x7e, 0x11, 0x72, 0xd0, 0x7b, 0xc9, 0xfa, 0x26, 0x9e, 0xf1, 0xd1, 0xcf, 0x26, 0x34, 0xd5, 0x48, - 0xfa, 0x95, 0xc6, 0xb3, 0xfb, 0x6a, 0xbf, 0x9a, 0xdc, 0x57, 0x7b, 0x4e, 0x7a, 0x1d, 0xd0, 0x5a, - 0x3b, 0x44, 0xfd, 0xf6, 0x4b, 0x05, 0xe6, 0x1a, 0xd1, 0x79, 0x95, 0x4e, 0xda, 0x7a, 0x4e, 0x91, - 0xa5, 0x7a, 0x62, 0x38, 0x28, 0xce, 0xc5, 0x08, 0x38, 0xae, 0x8c, 0xfa, 0x2a, 0x9c, 0xdd, 0x7f, - 0xf1, 0x87, 0x88, 0xa8, 0x9f, 0x64, 0x12, 0x77, 0x90, 0xfb, 0x94, 0xa2, 0x3a, 0x2c, 0x19, 0x2d, - 0xcb, 0x76, 0xc9, 0x5d, 0xab, 0x63, 0xd9, 0x0f, 0xad, 0x55, 0xbb, 0xdb, 0x25, 0x16, 0x15, 0xff, - 0xf9, 0xcc, 0x56, 0xcf, 0x49, 0x29, 0x4b, 0x9b, 0x93, 0x98, 0xf0, 0xe4, 0xb9, 0xfc, 0x5b, 0x25, - 0x75, 0x0d, 0x9d, 0xf2, 0xdd, 0x8f, 0x35, 0x6f, 0xea, 0x9c, 0x8a, 0xe5, 0x68, 0xec, 0x97, 0x91, - 0xcc, 0x67, 0xf1, 0xcb, 0x48, 0x58, 0xf0, 0x64, 0xf7, 0x29, 0x78, 0xbe, 0xc4, 0xc2, 0xb0, 0x49, - 0xb6, 0x35, 0xb7, 0xe3, 0xf1, 0x2e, 0x58, 0x5e, 0x78, 0xfe, 0x7a, 0x40, 0xc4, 0xd1, 0xf8, 0x68, - 0xe5, 0x3f, 0xf3, 0xa2, 0x54, 0xfe, 0xea, 0x3f, 0x14, 0xc8, 0xaf, 0xda, 0x56, 0xc3, 0xe0, 0x9f, - 0x66, 0x2e, 0x43, 0x96, 0xf6, 0x9d, 0xc0, 0x4b, 0x82, 0xfd, 0xcd, 0xee, 0xf4, 0x1d, 0xf2, 0xe9, - 0xa0, 0xb8, 0x10, 0x32, 0x32, 0x02, 0xe6, 0xac, 0xe8, 0x16, 0xdb, 0x4e, 0x8d, 0xfa, 0x9e, 0x4c, - 0x45, 0x57, 0xa2, 0xed, 0x64, 0xd4, 0x4f, 0x07, 0x45, 0x35, 0xfa, 0x8d, 0xb0, 0xac, 0xdb, 0x2e, - 0x29, 0xf7, 0x2e, 0x97, 0x42, 0x49, 0x82, 0x0b, 0x4b, 0x19, 0xcc, 0x39, 0x5c, 0xa2, 0x79, 0xe1, - 0x95, 0x31, 0x74, 0x0e, 0xcc, 0xa9, 0x58, 0x8e, 0xa2, 0x8b, 0x90, 0xeb, 0x12, 0xcf, 0x63, 0x15, - 0xe7, 0xc8, 0xd7, 0xe3, 0x6d, 0x41, 0xc6, 0xc1, 0xb8, 0xfa, 0xfd, 0x0c, 0x2c, 0x6c, 0x10, 0x8b, - 0xb8, 0x86, 0x2e, 0xc0, 0xd0, 0x4d, 0x40, 0xf6, 0xae, 0x47, 0xdc, 0x1e, 0x69, 0x6c, 0x88, 0x5f, - 0x17, 0x59, 0xc6, 0x61, 0x6b, 0xce, 0x44, 0x7d, 0xe8, 0x3b, 0x63, 0x1c, 0x78, 0xc2, 0x2c, 0xf4, - 0xae, 0x02, 0xa0, 0x07, 0x8b, 0xf1, 0xe4, 0x2d, 0xfc, 0x28, 0xbb, 0xa1, 0xa1, 0xa5, 0xa2, 0xad, - 0x0c, 0x49, 0x1e, 0x8e, 0x61, 0xa3, 0x6d, 0x38, 0xd5, 0x74, 0x0d, 0x62, 0x35, 0xcc, 0xfe, 0x1a, - 0xf1, 0x74, 0xd7, 0x70, 0x68, 0x74, 0xf7, 0xfe, 0x82, 0x9c, 0x7c, 0x6a, 0x7d, 0x9c, 0x05, 0x4f, - 0x9a, 0xc7, 0xac, 0xe4, 0x7b, 0xa4, 0xe9, 0x9b, 0xd7, 0x5d, 0xd7, 0x76, 0xb7, 0x13, 0xd6, 0x0e, - 0xad, 0x74, 0x77, 0x8c, 0x03, 0x4f, 0x98, 0xa5, 0x6e, 0xc2, 0x48, 0x2e, 0x43, 0x5f, 0x81, 0x85, - 0x96, 0x6b, 0xfb, 0x8e, 0xcc, 0xb1, 0xc1, 0xf5, 0xe8, 0xe4, 0x70, 0x50, 0x5c, 0xd8, 0x88, 0x0f, - 0xe0, 0x24, 0x9f, 0x7a, 0x05, 0x82, 0xaf, 0xf7, 0xf1, 0xef, 0xfb, 0xca, 0xb3, 0xbf, 0xef, 0x57, - 0xed, 0x27, 0x4f, 0x97, 0x8f, 0x7d, 0xf8, 0x74, 0xf9, 0xd8, 0xc7, 0x4f, 0x97, 0x8f, 0x7d, 0x6f, - 0xb8, 0xac, 0x3c, 0x19, 0x2e, 0x2b, 0x1f, 0x0e, 0x97, 0x95, 0x8f, 0x87, 0xcb, 0xca, 0x3f, 0x87, - 0xcb, 0xca, 0xe3, 0x7f, 0x2d, 0x1f, 0x7b, 0x7d, 0xf3, 0xc8, 0x7e, 0x36, 0xfe, 0x4f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x42, 0x66, 0x62, 0x39, 0xb8, 0x2c, 0x00, 0x00, -} - -func (m *AppCluster) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppCluster) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppCluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.KubeconfigSecretRef != nil { - { - size, err := m.KubeconfigSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppClusterKubeconfigSecretRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppClusterKubeconfigSecretRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppClusterKubeconfigSecretRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppDeploy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppDeploy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppDeploy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Kapp != nil { - { - size, err := m.Kapp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppDeployKapp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppDeployKapp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppDeployKapp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Delete != nil { - { - size, err := m.Delete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Inspect != nil { - { - size, err := m.Inspect.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.RawOptions) > 0 { - for iNdEx := len(m.RawOptions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RawOptions[iNdEx]) - copy(dAtA[i:], m.RawOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawOptions[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.MapNs) > 0 { - for iNdEx := len(m.MapNs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MapNs[iNdEx]) - copy(dAtA[i:], m.MapNs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MapNs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.IntoNs) - copy(dAtA[i:], m.IntoNs) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IntoNs))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppDeployKappDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppDeployKappDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppDeployKappDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.RawOptions) > 0 { - for iNdEx := len(m.RawOptions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RawOptions[iNdEx]) - copy(dAtA[i:], m.RawOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawOptions[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AppDeployKappInspect) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppDeployKappInspect) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppDeployKappInspect) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.RawOptions) > 0 { - for iNdEx := len(m.RawOptions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RawOptions[iNdEx]) - copy(dAtA[i:], m.RawOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawOptions[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AppFetch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x3a - if m.ImgpkgBundle != nil { - { - size, err := m.ImgpkgBundle.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.HelmChart != nil { - { - size, err := m.HelmChart.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Git != nil { - { - size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Image != nil { - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Inline != nil { - { - size, err := m.Inline.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppFetchGit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchGit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchGit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ForceHTTPBasicAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - if m.RefSelection != nil { - { - size, err := m.RefSelection.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - i-- - if m.LFSSkipSmudge { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - i -= len(m.SubPath) - copy(dAtA[i:], m.SubPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i-- - dAtA[i] = 0x22 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchHTTP) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchHTTP) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchHTTP) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.SubPath) - copy(dAtA[i:], m.SubPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i-- - dAtA[i] = 0x22 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.SHA256) - copy(dAtA[i:], m.SHA256) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SHA256))) - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchHelmChart) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchHelmChart) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchHelmChart) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Repository != nil { - { - size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchHelmChartRepo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchHelmChartRepo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchHelmChartRepo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchImage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TagSelection != nil { - { - size, err := m.TagSelection.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i -= len(m.SubPath) - copy(dAtA[i:], m.SubPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i-- - dAtA[i] = 0x1a - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchImgpkgBundle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchImgpkgBundle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchImgpkgBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TagSelection != nil { - { - size, err := m.TagSelection.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchInline) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchInline) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchInline) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PathsFrom) > 0 { - for iNdEx := len(m.PathsFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PathsFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Paths) > 0 { - keysForPaths := make([]string, 0, len(m.Paths)) - for k := range m.Paths { - keysForPaths = append(keysForPaths, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPaths) - for iNdEx := len(keysForPaths) - 1; iNdEx >= 0; iNdEx-- { - v := m.Paths[string(keysForPaths[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForPaths[iNdEx]) - copy(dAtA[i:], keysForPaths[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPaths[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AppFetchInlineSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchInlineSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchInlineSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ConfigMapRef != nil { - { - size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppFetchInlineSourceRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchInlineSourceRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchInlineSourceRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.DirectoryPath) - copy(dAtA[i:], m.DirectoryPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DirectoryPath))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppFetchLocalRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppFetchLocalRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppFetchLocalRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.DefaultNamespace) - copy(dAtA[i:], m.DefaultNamespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultNamespace))) - i-- - dAtA[i] = 0x52 - i-- - if m.NoopDelete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - if m.SyncPeriod != nil { - { - size, err := m.SyncPeriod.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - i-- - if m.Canceled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - i-- - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Deploy) > 0 { - for iNdEx := len(m.Deploy) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deploy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Template) > 0 { - for iNdEx := len(m.Template) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Template[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Fetch) > 0 { - for iNdEx := len(m.Fetch) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Fetch[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Cluster != nil { - { - size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Cue != nil { - { - size, err := m.Cue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.Sops != nil { - { - size, err := m.Sops.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Jsonnet != nil { - { - size, err := m.Jsonnet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Kustomize != nil { - { - size, err := m.Kustomize.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.HelmTemplate != nil { - { - size, err := m.HelmTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Kbld != nil { - { - size, err := m.Kbld.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Ytt != nil { - { - size, err := m.Ytt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateCue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateCue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateCue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.OutputExpression) - copy(dAtA[i:], m.OutputExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutputExpression))) - i-- - dAtA[i] = 0x22 - i -= len(m.InputExpression) - copy(dAtA[i:], m.InputExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.InputExpression))) - i-- - dAtA[i] = 0x1a - if len(m.ValuesFrom) > 0 { - for iNdEx := len(m.ValuesFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValuesFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Paths[iNdEx]) - copy(dAtA[i:], m.Paths[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateHelmTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateHelmTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateHelmTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.KubernetesAPIs != nil { - { - size, err := m.KubernetesAPIs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.KubernetesVersion != nil { - { - size, err := m.KubernetesVersion.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.ValuesFrom) > 0 { - for iNdEx := len(m.ValuesFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValuesFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x1a - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppTemplateJsonnet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateJsonnet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateJsonnet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *AppTemplateKbld) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateKbld) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateKbld) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Paths[iNdEx]) - copy(dAtA[i:], m.Paths[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateKustomize) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateKustomize) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateKustomize) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *AppTemplateSops) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateSops) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateSops) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Age != nil { - { - size, err := m.Age.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Paths[iNdEx]) - copy(dAtA[i:], m.Paths[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.PGP != nil { - { - size, err := m.PGP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateSopsAge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateSopsAge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateSopsAge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PrivateKeysSecretRef != nil { - { - size, err := m.PrivateKeysSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateSopsPGP) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateSopsPGP) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateSopsPGP) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PrivateKeysSecretRef != nil { - { - size, err := m.PrivateKeysSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateSopsPrivateKeysSecretRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateSopsPrivateKeysSecretRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateSopsPrivateKeysSecretRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppTemplateValuesDownwardAPI) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateValuesDownwardAPI) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateValuesDownwardAPI) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateValuesDownwardAPIItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateValuesDownwardAPIItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateValuesDownwardAPIItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.KubernetesAPIs != nil { - { - size, err := m.KubernetesAPIs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.KappControllerVersion != nil { - { - size, err := m.KappControllerVersion.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.KubernetesVersion != nil { - { - size, err := m.KubernetesVersion.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.FieldPath) - copy(dAtA[i:], m.FieldPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppTemplateValuesSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateValuesSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateValuesSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DownwardAPI != nil { - { - size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x1a - if m.ConfigMapRef != nil { - { - size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppTemplateValuesSourceRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateValuesSourceRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateValuesSourceRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppTemplateYtt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppTemplateYtt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppTemplateYtt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ValuesFrom) > 0 { - for iNdEx := len(m.ValuesFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValuesFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.FileMarks) > 0 { - for iNdEx := len(m.FileMarks) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.FileMarks[iNdEx]) - copy(dAtA[i:], m.FileMarks[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FileMarks[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Paths[iNdEx]) - copy(dAtA[i:], m.Paths[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.Inline != nil { - { - size, err := m.Inline.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i-- - if m.Strict { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i-- - if m.IgnoreUnknownComments { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *Condition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Condition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x22 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GenericStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenericStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenericStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.UsefulErrorMessage) - copy(dAtA[i:], m.UsefulErrorMessage) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UsefulErrorMessage))) - i-- - dAtA[i] = 0x22 - i -= len(m.FriendlyDescription) - copy(dAtA[i:], m.FriendlyDescription) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FriendlyDescription))) - i-- - dAtA[i] = 0x1a - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *KubernetesAPIs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KubernetesAPIs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KubernetesAPIs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.GroupVersions) > 0 { - for iNdEx := len(m.GroupVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.GroupVersions[iNdEx]) - copy(dAtA[i:], m.GroupVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersions[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Version) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Version) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AppCluster) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if m.KubeconfigSecretRef != nil { - l = m.KubeconfigSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppClusterKubeconfigSecretRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppDeploy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Kapp != nil { - l = m.Kapp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppDeployKapp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IntoNs) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MapNs) > 0 { - for _, s := range m.MapNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RawOptions) > 0 { - for _, s := range m.RawOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Inspect != nil { - l = m.Inspect.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Delete != nil { - l = m.Delete.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppDeployKappDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.RawOptions) > 0 { - for _, s := range m.RawOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AppDeployKappInspect) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.RawOptions) > 0 { - for _, s := range m.RawOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AppFetch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Inline != nil { - l = m.Inline.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Image != nil { - l = m.Image.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Git != nil { - l = m.Git.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HelmChart != nil { - l = m.HelmChart.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ImgpkgBundle != nil { - l = m.ImgpkgBundle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppFetchGit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Ref) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.RefSelection != nil { - l = m.RefSelection.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *AppFetchHTTP) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SHA256) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppFetchHelmChart) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - if m.Repository != nil { - l = m.Repository.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppFetchHelmChartRepo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppFetchImage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.TagSelection != nil { - l = m.TagSelection.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppFetchImgpkgBundle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TagSelection != nil { - l = m.TagSelection.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppFetchInline) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for k, v := range m.Paths { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.PathsFrom) > 0 { - for _, e := range m.PathsFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AppFetchInlineSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppFetchInlineSourceRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DirectoryPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppFetchLocalRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Cluster != nil { - l = m.Cluster.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Fetch) > 0 { - for _, e := range m.Fetch { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Template) > 0 { - for _, e := range m.Template { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Deploy) > 0 { - for _, e := range m.Deploy { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - if m.SyncPeriod != nil { - l = m.SyncPeriod.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.DefaultNamespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Ytt != nil { - l = m.Ytt.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kbld != nil { - l = m.Kbld.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HelmTemplate != nil { - l = m.HelmTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kustomize != nil { - l = m.Kustomize.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Jsonnet != nil { - l = m.Jsonnet.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Sops != nil { - l = m.Sops.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cue != nil { - l = m.Cue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateCue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ValuesFrom) > 0 { - for _, e := range m.ValuesFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.InputExpression) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OutputExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppTemplateHelmTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ValuesFrom) > 0 { - for _, e := range m.ValuesFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.KubernetesVersion != nil { - l = m.KubernetesVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.KubernetesAPIs != nil { - l = m.KubernetesAPIs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateJsonnet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *AppTemplateKbld) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AppTemplateKustomize) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *AppTemplateSops) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PGP != nil { - l = m.PGP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Age != nil { - l = m.Age.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateSopsAge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PrivateKeysSecretRef != nil { - l = m.PrivateKeysSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateSopsPGP) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PrivateKeysSecretRef != nil { - l = m.PrivateKeysSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateSopsPrivateKeysSecretRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppTemplateValuesDownwardAPI) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AppTemplateValuesDownwardAPIItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.KubernetesVersion != nil { - l = m.KubernetesVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.KappControllerVersion != nil { - l = m.KappControllerVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.KubernetesAPIs != nil { - l = m.KubernetesAPIs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateValuesSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppTemplateValuesSourceRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppTemplateYtt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - n += 2 - if m.Inline != nil { - l = m.Inline.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.FileMarks) > 0 { - for _, s := range m.FileMarks { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ValuesFrom) > 0 { - for _, e := range m.ValuesFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Condition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GenericStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.FriendlyDescription) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UsefulErrorMessage) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *KubernetesAPIs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.GroupVersions) > 0 { - for _, s := range m.GroupVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Version) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AppCluster) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppCluster{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `KubeconfigSecretRef:` + strings.Replace(this.KubeconfigSecretRef.String(), "AppClusterKubeconfigSecretRef", "AppClusterKubeconfigSecretRef", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppClusterKubeconfigSecretRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppClusterKubeconfigSecretRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `}`, - }, "") - return s -} -func (this *AppDeploy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppDeploy{`, - `Kapp:` + strings.Replace(this.Kapp.String(), "AppDeployKapp", "AppDeployKapp", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppDeployKapp) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppDeployKapp{`, - `IntoNs:` + fmt.Sprintf("%v", this.IntoNs) + `,`, - `MapNs:` + fmt.Sprintf("%v", this.MapNs) + `,`, - `RawOptions:` + fmt.Sprintf("%v", this.RawOptions) + `,`, - `Inspect:` + strings.Replace(this.Inspect.String(), "AppDeployKappInspect", "AppDeployKappInspect", 1) + `,`, - `Delete:` + strings.Replace(this.Delete.String(), "AppDeployKappDelete", "AppDeployKappDelete", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppDeployKappDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppDeployKappDelete{`, - `RawOptions:` + fmt.Sprintf("%v", this.RawOptions) + `,`, - `}`, - }, "") - return s -} -func (this *AppDeployKappInspect) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppDeployKappInspect{`, - `RawOptions:` + fmt.Sprintf("%v", this.RawOptions) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetch) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetch{`, - `Inline:` + strings.Replace(this.Inline.String(), "AppFetchInline", "AppFetchInline", 1) + `,`, - `Image:` + strings.Replace(this.Image.String(), "AppFetchImage", "AppFetchImage", 1) + `,`, - `HTTP:` + strings.Replace(this.HTTP.String(), "AppFetchHTTP", "AppFetchHTTP", 1) + `,`, - `Git:` + strings.Replace(this.Git.String(), "AppFetchGit", "AppFetchGit", 1) + `,`, - `HelmChart:` + strings.Replace(this.HelmChart.String(), "AppFetchHelmChart", "AppFetchHelmChart", 1) + `,`, - `ImgpkgBundle:` + strings.Replace(this.ImgpkgBundle.String(), "AppFetchImgpkgBundle", "AppFetchImgpkgBundle", 1) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchGit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchGit{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppFetchLocalRef", "AppFetchLocalRef", 1) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `LFSSkipSmudge:` + fmt.Sprintf("%v", this.LFSSkipSmudge) + `,`, - `RefSelection:` + strings.Replace(fmt.Sprintf("%v", this.RefSelection), "VersionSelection", "v1alpha1.VersionSelection", 1) + `,`, - `ForceHTTPBasicAuth:` + fmt.Sprintf("%v", this.ForceHTTPBasicAuth) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchHTTP) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchHTTP{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `SHA256:` + fmt.Sprintf("%v", this.SHA256) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppFetchLocalRef", "AppFetchLocalRef", 1) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchHelmChart) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchHelmChart{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Repository:` + strings.Replace(this.Repository.String(), "AppFetchHelmChartRepo", "AppFetchHelmChartRepo", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchHelmChartRepo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchHelmChartRepo{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppFetchLocalRef", "AppFetchLocalRef", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchImage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchImage{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppFetchLocalRef", "AppFetchLocalRef", 1) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `TagSelection:` + strings.Replace(fmt.Sprintf("%v", this.TagSelection), "VersionSelection", "v1alpha1.VersionSelection", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchImgpkgBundle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchImgpkgBundle{`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppFetchLocalRef", "AppFetchLocalRef", 1) + `,`, - `TagSelection:` + strings.Replace(fmt.Sprintf("%v", this.TagSelection), "VersionSelection", "v1alpha1.VersionSelection", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchInline) String() string { - if this == nil { - return "nil" - } - repeatedStringForPathsFrom := "[]AppFetchInlineSource{" - for _, f := range this.PathsFrom { - repeatedStringForPathsFrom += strings.Replace(strings.Replace(f.String(), "AppFetchInlineSource", "AppFetchInlineSource", 1), `&`, ``, 1) + "," - } - repeatedStringForPathsFrom += "}" - keysForPaths := make([]string, 0, len(this.Paths)) - for k := range this.Paths { - keysForPaths = append(keysForPaths, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPaths) - mapStringForPaths := "map[string]string{" - for _, k := range keysForPaths { - mapStringForPaths += fmt.Sprintf("%v: %v,", k, this.Paths[k]) - } - mapStringForPaths += "}" - s := strings.Join([]string{`&AppFetchInline{`, - `Paths:` + mapStringForPaths + `,`, - `PathsFrom:` + repeatedStringForPathsFrom + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchInlineSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchInlineSource{`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppFetchInlineSourceRef", "AppFetchInlineSourceRef", 1) + `,`, - `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "AppFetchInlineSourceRef", "AppFetchInlineSourceRef", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchInlineSourceRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchInlineSourceRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DirectoryPath:` + fmt.Sprintf("%v", this.DirectoryPath) + `,`, - `}`, - }, "") - return s -} -func (this *AppFetchLocalRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppFetchLocalRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AppSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForFetch := "[]AppFetch{" - for _, f := range this.Fetch { - repeatedStringForFetch += strings.Replace(strings.Replace(f.String(), "AppFetch", "AppFetch", 1), `&`, ``, 1) + "," - } - repeatedStringForFetch += "}" - repeatedStringForTemplate := "[]AppTemplate{" - for _, f := range this.Template { - repeatedStringForTemplate += strings.Replace(strings.Replace(f.String(), "AppTemplate", "AppTemplate", 1), `&`, ``, 1) + "," - } - repeatedStringForTemplate += "}" - repeatedStringForDeploy := "[]AppDeploy{" - for _, f := range this.Deploy { - repeatedStringForDeploy += strings.Replace(strings.Replace(f.String(), "AppDeploy", "AppDeploy", 1), `&`, ``, 1) + "," - } - repeatedStringForDeploy += "}" - s := strings.Join([]string{`&AppSpec{`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `Cluster:` + strings.Replace(this.Cluster.String(), "AppCluster", "AppCluster", 1) + `,`, - `Fetch:` + repeatedStringForFetch + `,`, - `Template:` + repeatedStringForTemplate + `,`, - `Deploy:` + repeatedStringForDeploy + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `Canceled:` + fmt.Sprintf("%v", this.Canceled) + `,`, - `SyncPeriod:` + strings.Replace(fmt.Sprintf("%v", this.SyncPeriod), "Duration", "v1.Duration", 1) + `,`, - `NoopDelete:` + fmt.Sprintf("%v", this.NoopDelete) + `,`, - `DefaultNamespace:` + fmt.Sprintf("%v", this.DefaultNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplate{`, - `Ytt:` + strings.Replace(this.Ytt.String(), "AppTemplateYtt", "AppTemplateYtt", 1) + `,`, - `Kbld:` + strings.Replace(this.Kbld.String(), "AppTemplateKbld", "AppTemplateKbld", 1) + `,`, - `HelmTemplate:` + strings.Replace(this.HelmTemplate.String(), "AppTemplateHelmTemplate", "AppTemplateHelmTemplate", 1) + `,`, - `Kustomize:` + strings.Replace(this.Kustomize.String(), "AppTemplateKustomize", "AppTemplateKustomize", 1) + `,`, - `Jsonnet:` + strings.Replace(this.Jsonnet.String(), "AppTemplateJsonnet", "AppTemplateJsonnet", 1) + `,`, - `Sops:` + strings.Replace(this.Sops.String(), "AppTemplateSops", "AppTemplateSops", 1) + `,`, - `Cue:` + strings.Replace(this.Cue.String(), "AppTemplateCue", "AppTemplateCue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateCue) String() string { - if this == nil { - return "nil" - } - repeatedStringForValuesFrom := "[]AppTemplateValuesSource{" - for _, f := range this.ValuesFrom { - repeatedStringForValuesFrom += strings.Replace(strings.Replace(f.String(), "AppTemplateValuesSource", "AppTemplateValuesSource", 1), `&`, ``, 1) + "," - } - repeatedStringForValuesFrom += "}" - s := strings.Join([]string{`&AppTemplateCue{`, - `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, - `ValuesFrom:` + repeatedStringForValuesFrom + `,`, - `InputExpression:` + fmt.Sprintf("%v", this.InputExpression) + `,`, - `OutputExpression:` + fmt.Sprintf("%v", this.OutputExpression) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateHelmTemplate) String() string { - if this == nil { - return "nil" - } - repeatedStringForValuesFrom := "[]AppTemplateValuesSource{" - for _, f := range this.ValuesFrom { - repeatedStringForValuesFrom += strings.Replace(strings.Replace(f.String(), "AppTemplateValuesSource", "AppTemplateValuesSource", 1), `&`, ``, 1) + "," - } - repeatedStringForValuesFrom += "}" - s := strings.Join([]string{`&AppTemplateHelmTemplate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ValuesFrom:` + repeatedStringForValuesFrom + `,`, - `KubernetesVersion:` + strings.Replace(this.KubernetesVersion.String(), "Version", "Version", 1) + `,`, - `KubernetesAPIs:` + strings.Replace(this.KubernetesAPIs.String(), "KubernetesAPIs", "KubernetesAPIs", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateJsonnet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateJsonnet{`, - `}`, - }, "") - return s -} -func (this *AppTemplateKbld) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateKbld{`, - `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateKustomize) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateKustomize{`, - `}`, - }, "") - return s -} -func (this *AppTemplateSops) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateSops{`, - `PGP:` + strings.Replace(this.PGP.String(), "AppTemplateSopsPGP", "AppTemplateSopsPGP", 1) + `,`, - `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, - `Age:` + strings.Replace(this.Age.String(), "AppTemplateSopsAge", "AppTemplateSopsAge", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateSopsAge) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateSopsAge{`, - `PrivateKeysSecretRef:` + strings.Replace(this.PrivateKeysSecretRef.String(), "AppTemplateSopsPrivateKeysSecretRef", "AppTemplateSopsPrivateKeysSecretRef", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateSopsPGP) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateSopsPGP{`, - `PrivateKeysSecretRef:` + strings.Replace(this.PrivateKeysSecretRef.String(), "AppTemplateSopsPrivateKeysSecretRef", "AppTemplateSopsPrivateKeysSecretRef", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateSopsPrivateKeysSecretRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateSopsPrivateKeysSecretRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateValuesDownwardAPI) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]AppTemplateValuesDownwardAPIItem{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppTemplateValuesDownwardAPIItem", "AppTemplateValuesDownwardAPIItem", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&AppTemplateValuesDownwardAPI{`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateValuesDownwardAPIItem) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateValuesDownwardAPIItem{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `KubernetesVersion:` + strings.Replace(this.KubernetesVersion.String(), "Version", "Version", 1) + `,`, - `KappControllerVersion:` + strings.Replace(this.KappControllerVersion.String(), "Version", "Version", 1) + `,`, - `KubernetesAPIs:` + strings.Replace(this.KubernetesAPIs.String(), "KubernetesAPIs", "KubernetesAPIs", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateValuesSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateValuesSource{`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "AppTemplateValuesSourceRef", "AppTemplateValuesSourceRef", 1) + `,`, - `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "AppTemplateValuesSourceRef", "AppTemplateValuesSourceRef", 1) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "AppTemplateValuesDownwardAPI", "AppTemplateValuesDownwardAPI", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateValuesSourceRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppTemplateValuesSourceRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AppTemplateYtt) String() string { - if this == nil { - return "nil" - } - repeatedStringForValuesFrom := "[]AppTemplateValuesSource{" - for _, f := range this.ValuesFrom { - repeatedStringForValuesFrom += strings.Replace(strings.Replace(f.String(), "AppTemplateValuesSource", "AppTemplateValuesSource", 1), `&`, ``, 1) + "," - } - repeatedStringForValuesFrom += "}" - s := strings.Join([]string{`&AppTemplateYtt{`, - `IgnoreUnknownComments:` + fmt.Sprintf("%v", this.IgnoreUnknownComments) + `,`, - `Strict:` + fmt.Sprintf("%v", this.Strict) + `,`, - `Inline:` + strings.Replace(this.Inline.String(), "AppFetchInline", "AppFetchInline", 1) + `,`, - `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, - `FileMarks:` + fmt.Sprintf("%v", this.FileMarks) + `,`, - `ValuesFrom:` + repeatedStringForValuesFrom + `,`, - `}`, - }, "") - return s -} -func (this *Condition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Condition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *GenericStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&GenericStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `FriendlyDescription:` + fmt.Sprintf("%v", this.FriendlyDescription) + `,`, - `UsefulErrorMessage:` + fmt.Sprintf("%v", this.UsefulErrorMessage) + `,`, - `}`, - }, "") - return s -} -func (this *KubernetesAPIs) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KubernetesAPIs{`, - `GroupVersions:` + fmt.Sprintf("%v", this.GroupVersions) + `,`, - `}`, - }, "") - return s -} -func (this *Version) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Version{`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AppCluster) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppCluster: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppCluster: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeconfigSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KubeconfigSecretRef == nil { - m.KubeconfigSecretRef = &AppClusterKubeconfigSecretRef{} - } - if err := m.KubeconfigSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppClusterKubeconfigSecretRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppClusterKubeconfigSecretRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppClusterKubeconfigSecretRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppDeploy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppDeploy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppDeploy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kapp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kapp == nil { - m.Kapp = &AppDeployKapp{} - } - if err := m.Kapp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppDeployKapp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppDeployKapp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppDeployKapp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntoNs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IntoNs = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MapNs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MapNs = append(m.MapNs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawOptions = append(m.RawOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inspect", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inspect == nil { - m.Inspect = &AppDeployKappInspect{} - } - if err := m.Inspect.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Delete == nil { - m.Delete = &AppDeployKappDelete{} - } - if err := m.Delete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppDeployKappDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppDeployKappDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppDeployKappDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawOptions = append(m.RawOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppDeployKappInspect) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppDeployKappInspect: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppDeployKappInspect: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RawOptions = append(m.RawOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inline", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inline == nil { - m.Inline = &AppFetchInline{} - } - if err := m.Inline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Image == nil { - m.Image = &AppFetchImage{} - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &AppFetchHTTP{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Git == nil { - m.Git = &AppFetchGit{} - } - if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HelmChart", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HelmChart == nil { - m.HelmChart = &AppFetchHelmChart{} - } - if err := m.HelmChart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImgpkgBundle", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ImgpkgBundle == nil { - m.ImgpkgBundle = &AppFetchImgpkgBundle{} - } - if err := m.ImgpkgBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchGit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchGit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchGit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppFetchLocalRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LFSSkipSmudge", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LFSSkipSmudge = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RefSelection", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RefSelection == nil { - m.RefSelection = &v1alpha1.VersionSelection{} - } - if err := m.RefSelection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceHTTPBasicAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ForceHTTPBasicAuth = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchHTTP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchHTTP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchHTTP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SHA256", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SHA256 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppFetchLocalRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchHelmChart) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchHelmChart: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchHelmChart: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Repository == nil { - m.Repository = &AppFetchHelmChartRepo{} - } - if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchHelmChartRepo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchHelmChartRepo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchHelmChartRepo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppFetchLocalRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppFetchLocalRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TagSelection", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TagSelection == nil { - m.TagSelection = &v1alpha1.VersionSelection{} - } - if err := m.TagSelection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchImgpkgBundle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchImgpkgBundle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchImgpkgBundle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppFetchLocalRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TagSelection", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TagSelection == nil { - m.TagSelection = &v1alpha1.VersionSelection{} - } - if err := m.TagSelection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchInline) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchInline: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchInline: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Paths == nil { - m.Paths = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Paths[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathsFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathsFrom = append(m.PathsFrom, AppFetchInlineSource{}) - if err := m.PathsFrom[len(m.PathsFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchInlineSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchInlineSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchInlineSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppFetchInlineSourceRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &AppFetchInlineSourceRef{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchInlineSourceRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchInlineSourceRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchInlineSourceRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DirectoryPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DirectoryPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppFetchLocalRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppFetchLocalRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppFetchLocalRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cluster == nil { - m.Cluster = &AppCluster{} - } - if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fetch", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Fetch = append(m.Fetch, AppFetch{}) - if err := m.Fetch[len(m.Fetch)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Template = append(m.Template, AppTemplate{}) - if err := m.Template[len(m.Template)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deploy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deploy = append(m.Deploy, AppDeploy{}) - if err := m.Deploy[len(m.Deploy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Canceled = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SyncPeriod", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SyncPeriod == nil { - m.SyncPeriod = &v1.Duration{} - } - if err := m.SyncPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoopDelete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoopDelete = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultNamespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ytt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Ytt == nil { - m.Ytt = &AppTemplateYtt{} - } - if err := m.Ytt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kbld", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kbld == nil { - m.Kbld = &AppTemplateKbld{} - } - if err := m.Kbld.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HelmTemplate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HelmTemplate == nil { - m.HelmTemplate = &AppTemplateHelmTemplate{} - } - if err := m.HelmTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kustomize", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kustomize == nil { - m.Kustomize = &AppTemplateKustomize{} - } - if err := m.Kustomize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Jsonnet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Jsonnet == nil { - m.Jsonnet = &AppTemplateJsonnet{} - } - if err := m.Jsonnet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sops", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Sops == nil { - m.Sops = &AppTemplateSops{} - } - if err := m.Sops.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cue == nil { - m.Cue = &AppTemplateCue{} - } - if err := m.Cue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateCue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateCue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateCue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValuesFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValuesFrom = append(m.ValuesFrom, AppTemplateValuesSource{}) - if err := m.ValuesFrom[len(m.ValuesFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InputExpression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InputExpression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OutputExpression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OutputExpression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateHelmTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateHelmTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateHelmTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValuesFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValuesFrom = append(m.ValuesFrom, AppTemplateValuesSource{}) - if err := m.ValuesFrom[len(m.ValuesFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KubernetesVersion == nil { - m.KubernetesVersion = &Version{} - } - if err := m.KubernetesVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesAPIs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KubernetesAPIs == nil { - m.KubernetesAPIs = &KubernetesAPIs{} - } - if err := m.KubernetesAPIs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateJsonnet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateJsonnet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateJsonnet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateKbld) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateKbld: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateKbld: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateKustomize) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateKustomize: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateKustomize: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateSops) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateSops: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateSops: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PGP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PGP == nil { - m.PGP = &AppTemplateSopsPGP{} - } - if err := m.PGP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Age", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Age == nil { - m.Age = &AppTemplateSopsAge{} - } - if err := m.Age.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateSopsAge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateSopsAge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateSopsAge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrivateKeysSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrivateKeysSecretRef == nil { - m.PrivateKeysSecretRef = &AppTemplateSopsPrivateKeysSecretRef{} - } - if err := m.PrivateKeysSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateSopsPGP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateSopsPGP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateSopsPGP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrivateKeysSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrivateKeysSecretRef == nil { - m.PrivateKeysSecretRef = &AppTemplateSopsPrivateKeysSecretRef{} - } - if err := m.PrivateKeysSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateSopsPrivateKeysSecretRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateSopsPrivateKeysSecretRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateSopsPrivateKeysSecretRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateValuesDownwardAPI) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateValuesDownwardAPI: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateValuesDownwardAPI: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, AppTemplateValuesDownwardAPIItem{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateValuesDownwardAPIItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateValuesDownwardAPIItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateValuesDownwardAPIItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KubernetesVersion == nil { - m.KubernetesVersion = &Version{} - } - if err := m.KubernetesVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KappControllerVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KappControllerVersion == nil { - m.KappControllerVersion = &Version{} - } - if err := m.KappControllerVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesAPIs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KubernetesAPIs == nil { - m.KubernetesAPIs = &KubernetesAPIs{} - } - if err := m.KubernetesAPIs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateValuesSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateValuesSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateValuesSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &AppTemplateValuesSourceRef{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &AppTemplateValuesSourceRef{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DownwardAPI == nil { - m.DownwardAPI = &AppTemplateValuesDownwardAPI{} - } - if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateValuesSourceRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateValuesSourceRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateValuesSourceRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppTemplateYtt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppTemplateYtt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppTemplateYtt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreUnknownComments", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreUnknownComments = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Strict = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inline", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inline == nil { - m.Inline = &AppFetchInline{} - } - if err := m.Inline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileMarks", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FileMarks = append(m.FileMarks, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValuesFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValuesFrom = append(m.ValuesFrom, AppTemplateValuesSource{}) - if err := m.ValuesFrom[len(m.ValuesFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Condition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Condition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenericStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenericStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenericStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FriendlyDescription", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FriendlyDescription = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsefulErrorMessage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UsefulErrorMessage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KubernetesAPIs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KubernetesAPIs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KubernetesAPIs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GroupVersions = append(m.GroupVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Version) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Version: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.proto b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.proto deleted file mode 100644 index 61e8100a9..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/generated.proto +++ /dev/null @@ -1,503 +0,0 @@ - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package github.com.vmware_tanzu.carvel_kapp_controller.pkg.apis.kappctrl.v1alpha1; - -import "carvel.dev/vendir/pkg/vendir/versions/v1alpha1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1"; - -// +k8s:openapi-gen=true -message AppCluster { - // Specifies namespace in destination cluster (optional) - // +optional - optional string namespace = 1; - - // Specifies secret containing kubeconfig (required) - // +optional - optional AppClusterKubeconfigSecretRef kubeconfigSecretRef = 2; -} - -// +k8s:openapi-gen=true -message AppClusterKubeconfigSecretRef { - // Specifies secret name within app's namespace (required) - // +optional - optional string name = 1; - - // Specifies key that contains kubeconfig (optional) - // +optional - optional string key = 2; -} - -// +k8s:openapi-gen=true -message AppDeploy { - // Use kapp to deploy resources - optional AppDeployKapp kapp = 1; -} - -// +k8s:openapi-gen=true -message AppDeployKapp { - // Override namespace for all resources (optional) - optional string intoNs = 1; - - // Provide custom namespace override mapping (optional) - repeated string mapNs = 2; - - // Pass through options to kapp deploy (optional) - repeated string rawOptions = 3; - - // Configuration for inspect command (optional) - // as of kapp-controller v0.31.0, inspect is disabled by default - // add rawOptions or use an empty inspect config like `inspect: {}` to enable - optional AppDeployKappInspect inspect = 4; - - // Configuration for delete command (optional) - optional AppDeployKappDelete delete = 5; -} - -// +k8s:openapi-gen=true -message AppDeployKappDelete { - // Pass through options to kapp delete (optional) - repeated string rawOptions = 1; -} - -// +k8s:openapi-gen=true -message AppDeployKappInspect { - // Pass through options to kapp inspect (optional) - repeated string rawOptions = 1; -} - -// +k8s:openapi-gen=true -message AppFetch { - // Pulls content from within this resource; or other resources in the cluster - optional AppFetchInline inline = 1; - - // Pulls content from Docker/OCI registry - optional AppFetchImage image = 2; - - // Uses http library to fetch file - optional AppFetchHTTP http = 3; - - // Uses git to clone repository - optional AppFetchGit git = 4; - - // Uses helm fetch to fetch specified chart - optional AppFetchHelmChart helmChart = 5; - - // Pulls imgpkg bundle from Docker/OCI registry (v0.17.0+) - optional AppFetchImgpkgBundle imgpkgBundle = 6; - - // Relative path to place the fetched artifacts - // +optional - optional string path = 7; -} - -// +k8s:openapi-gen=true -message AppFetchGit { - // http or ssh urls are supported (required) - optional string url = 1; - - // Branch, tag, commit; origin is the name of the remote (optional) - // +optional - optional string ref = 2; - - // Specifies a strategy to resolve to an explicit ref (optional; v0.24.0+) - // +optional - optional carvel.dev.vendir.pkg.vendir.versions.v1alpha1.VersionSelection refSelection = 6; - - // Secret with auth details. allowed keys: ssh-privatekey, ssh-knownhosts, username, password (optional) - // (if ssh-knownhosts is not specified, git will not perform strict host checking) - // +optional - optional AppFetchLocalRef secretRef = 3; - - // Grab only portion of repository (optional) - // +optional - optional string subPath = 4; - - // Skip lfs download (optional) - // +optional - optional bool lfsSkipSmudge = 5; - - // Force the usage of HTTP Basic Auth when Basic Auth is provided (optional) - // +optional - optional bool forceHTTPBasicAuth = 7; -} - -// +k8s:openapi-gen=true -message AppFetchHTTP { - // URL can point to one of following formats: text, tgz, zip - // http and https url are supported; - // plain file, tgz and tar types are supported (required) - optional string url = 1; - - // Checksum to verify after download (optional) - // +optional - optional string sha256 = 2; - - // Secret to provide auth details (optional) - // Secret may include one or more keys: username, password - // +optional - optional AppFetchLocalRef secretRef = 3; - - // Grab only portion of download (optional) - // +optional - optional string subPath = 4; -} - -// +k8s:openapi-gen=true -message AppFetchHelmChart { - // Example: stable/redis - optional string name = 1; - - // +optional - optional string version = 2; - - optional AppFetchHelmChartRepo repository = 3; -} - -// +k8s:openapi-gen=true -message AppFetchHelmChartRepo { - // Repository url; - // scheme of oci:// will fetch experimental helm oci chart (v0.19.0+) - // (required) - optional string url = 1; - - // +optional - optional AppFetchLocalRef secretRef = 2; -} - -// +k8s:openapi-gen=true -message AppFetchImage { - // Docker image url; unqualified, tagged, or - // digest references supported (required) - // Example: username/app1-config:v0.1.0 - optional string url = 1; - - // Specifies a strategy to choose a tag (optional; v0.24.0+) - // if specified, do not include a tag in url key - // +optional - optional carvel.dev.vendir.pkg.vendir.versions.v1alpha1.VersionSelection tagSelection = 4; - - // Secret may include one or more keys: username, password, token. - // By default anonymous access is used for authentication. - // +optional - optional AppFetchLocalRef secretRef = 2; - - // Grab only portion of image (optional) - // +optional - optional string subPath = 3; -} - -// +k8s:openapi-gen=true -message AppFetchImgpkgBundle { - // Docker image url; unqualified, tagged, or - // digest references supported (required) - optional string image = 1; - - // Specifies a strategy to choose a tag (optional; v0.24.0+) - // if specified, do not include a tag in url key - // +optional - optional carvel.dev.vendir.pkg.vendir.versions.v1alpha1.VersionSelection tagSelection = 3; - - // Secret may include one or more keys: username, password, token. - // By default anonymous access is used for authentication. - // +optional - optional AppFetchLocalRef secretRef = 2; -} - -// +k8s:openapi-gen=true -message AppFetchInline { - // Specifies mapping of paths to their content; - // not recommended for sensitive values as CR is not encrypted (optional) - map paths = 1; - - // Specifies content via secrets and config maps; - // data values are recommended to be placed in secrets (optional) - repeated AppFetchInlineSource pathsFrom = 2; -} - -// +k8s:openapi-gen=true -message AppFetchInlineSource { - optional AppFetchInlineSourceRef secretRef = 1; - - optional AppFetchInlineSourceRef configMapRef = 2; -} - -// +k8s:openapi-gen=true -message AppFetchInlineSourceRef { - // Specifies where to place files found in secret (optional) - optional string directoryPath = 2; - - optional string name = 1; -} - -// +k8s:openapi-gen=true -message AppFetchLocalRef { - // Object is expected to be within same namespace - optional string name = 1; -} - -// +k8s:openapi-gen=true -message AppSpec { - // Specifies that app should be deployed authenticated via - // given service account, found in this namespace (optional; v0.6.0+) - // +optional - optional string serviceAccountName = 1; - - // Specifies that app should be deployed to destination cluster; - // by default, cluster is same as where this resource resides (optional; v0.5.0+) - // +optional - optional AppCluster cluster = 2; - - // +optional - repeated AppFetch fetch = 3; - - // +optional - repeated AppTemplate template = 4; - - // +optional - repeated AppDeploy deploy = 5; - - // Pauses _future_ reconciliation; does _not_ affect - // currently running reconciliation (optional; default=false) - // +optional - optional bool paused = 6; - - // Cancels current and future reconciliations (optional; default=false) - // +optional - optional bool canceled = 7; - - // Specifies the length of time to wait, in time + unit - // format, before reconciling. Always >= 30s. If value below - // 30s is specified, 30s will be used. (optional; v0.9.0+; default=30s) - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration syncPeriod = 8; - - // Deletion requests for the App will result in the App CR being - // deleted, but its associated resources will not be deleted - // (optional; default=false; v0.18.0+) - // +optional - optional bool noopDelete = 9; - - // Specifies the default namespace to install the App resources, by default this is - // same as the App's namespace (optional; v0.48.0+) - // +optional - optional string defaultNamespace = 10; -} - -// +k8s:openapi-gen=true -message AppTemplate { - // Use ytt to template configuration - optional AppTemplateYtt ytt = 1; - - // Use kbld to resolve image references to use digests - optional AppTemplateKbld kbld = 2; - - // Use helm template command to render helm chart - optional AppTemplateHelmTemplate helmTemplate = 3; - - optional AppTemplateKustomize kustomize = 4; - - optional AppTemplateJsonnet jsonnet = 5; - - // Use sops to decrypt *.sops.yml files (optional; v0.11.0+) - optional AppTemplateSops sops = 6; - - optional AppTemplateCue cue = 7; -} - -// +k8s:openapi-gen=true -message AppTemplateCue { - // Explicit list of files/directories (optional) - repeated string paths = 1; - - // Provide values (optional) - repeated AppTemplateValuesSource valuesFrom = 2; - - // Cue expression for single path component, can be used to unify ValuesFrom into a given field (optional) - optional string inputExpression = 3; - - // Cue expression to output, default will export all visible fields (optional) - optional string outputExpression = 4; -} - -// +k8s:openapi-gen=true -message AppTemplateHelmTemplate { - // Set name explicitly, default is App CR's name (optional; v0.13.0+) - optional string name = 1; - - // Set namespace explicitly, default is App CR's namespace (optional; v0.13.0+) - optional string namespace = 2; - - // Path to chart (optional; v0.13.0+) - optional string path = 3; - - // One or more secrets, config maps, paths that provide values (optional) - repeated AppTemplateValuesSource valuesFrom = 4; - - // Optional: Get Kubernetes version, defaults (empty) to retrieving the version from the cluster. - // Can be manually overridden to a value instead. - optional Version kubernetesVersion = 5; - - // Optional: Use kubernetes group/versions resources available in the live cluster - optional KubernetesAPIs kubernetesAPIs = 6; -} - -// TODO implement jsonnet -// +k8s:openapi-gen=true -message AppTemplateJsonnet { -} - -// +k8s:openapi-gen=true -message AppTemplateKbld { - repeated string paths = 1; -} - -// TODO implement kustomize -// +k8s:openapi-gen=true -message AppTemplateKustomize { -} - -// +k8s:openapi-gen=true -message AppTemplateSops { - // Use PGP to decrypt files (required) - optional AppTemplateSopsPGP pgp = 1; - - // Lists paths to decrypt explicitly (optional; v0.13.0+) - repeated string paths = 2; - - optional AppTemplateSopsAge age = 3; -} - -// +k8s:openapi-gen=true -message AppTemplateSopsAge { - // Secret with private armored PGP private keys (required) - optional AppTemplateSopsPrivateKeysSecretRef privateKeysSecretRef = 1; -} - -// +k8s:openapi-gen=true -message AppTemplateSopsPGP { - // Secret with private armored PGP private keys (required) - optional AppTemplateSopsPrivateKeysSecretRef privateKeysSecretRef = 1; -} - -// +k8s:openapi-gen=true -message AppTemplateSopsPrivateKeysSecretRef { - optional string name = 1; -} - -// +k8s:openapi-gen=true -message AppTemplateValuesDownwardAPI { - repeated AppTemplateValuesDownwardAPIItem items = 1; -} - -// +k8s:openapi-gen=true -message AppTemplateValuesDownwardAPIItem { - optional string name = 1; - - // Required: Selects a field of the app: only annotations, labels, uid, name and namespace are supported. - optional string fieldPath = 2; - - // Optional: Get running Kubernetes version from cluster, defaults (empty) to retrieving the version from the cluster. - // Can be manually supplied instead. - optional Version kubernetesVersion = 3; - - // Optional: Get running KappController version, defaults (empty) to retrieving the current running version.. - // Can be manually supplied instead. - optional Version kappControllerVersion = 4; - - // Optional: Get running KubernetesAPIs from cluster, defaults (empty) to retrieving the APIs from the cluster. - // Can be manually supplied instead, e.g ["group/version", "group2/version2"] - optional KubernetesAPIs kubernetesAPIs = 5; -} - -// +k8s:openapi-gen=true -message AppTemplateValuesSource { - optional AppTemplateValuesSourceRef secretRef = 1; - - optional AppTemplateValuesSourceRef configMapRef = 2; - - optional string path = 3; - - optional AppTemplateValuesDownwardAPI downwardAPI = 4; -} - -// +k8s:openapi-gen=true -message AppTemplateValuesSourceRef { - optional string name = 1; -} - -// +k8s:openapi-gen=true -message AppTemplateYtt { - // Ignores comments that ytt doesn't recognize - // (optional; default=false) - optional bool ignoreUnknownComments = 1; - - // Forces strict mode https://github.com/k14s/ytt/blob/develop/docs/strict.md - // (optional; default=false) - optional bool strict = 2; - - // Specify additional files, including data values (optional) - optional AppFetchInline inline = 3; - - // Lists paths to provide to ytt explicitly (optional) - repeated string paths = 4; - - // Control metadata about input files passed to ytt (optional; v0.18.0+) - // see https://carvel.dev/ytt/docs/latest/file-marks/ for more details - repeated string fileMarks = 5; - - // Provide values via ytt's --data-values-file (optional; v0.19.0-alpha.9) - repeated AppTemplateValuesSource valuesFrom = 6; -} - -// +k8s:openapi-gen=true -message Condition { - optional string type = 1; - - optional string status = 2; - - // Unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying - // persistent volume is being resized. - // +optional - optional string reason = 3; - - // Human-readable message indicating details about last transition. - // +optional - optional string message = 4; -} - -message GenericStatus { - // Populated based on metadata.generation when controller - // observes a change to the resource; if this value is - // out of data, other status fields do not reflect latest state - // +optional - optional int64 observedGeneration = 1; - - // +optional - repeated Condition conditions = 2; - - // +optional - optional string friendlyDescription = 3; - - // +optional - optional string usefulErrorMessage = 4; -} - -// +k8s:openapi-gen=true -message KubernetesAPIs { - repeated string groupVersions = 1; -} - -// +k8s:openapi-gen=true -message Version { - optional string version = 1; -} - diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/register.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/register.go deleted file mode 100644 index b85712092..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/register.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var SchemeGroupVersion = schema.GroupVersion{Group: "kappctrl.k14s.io", Version: "v1alpha1"} - -var ( - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - localSchemeBuilder.Register(func(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, &App{}, &AppList{}) - scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil - }) -} - -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/status.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/status.go deleted file mode 100644 index 71fc4bb2d..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/status.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" -) - -type GenericStatus struct { - // Populated based on metadata.generation when controller - // observes a change to the resource; if this value is - // out of data, other status fields do not reflect latest state - // +optional - ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,1,opt,name=observedGeneration"` - // +optional - Conditions []Condition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` - // +optional - FriendlyDescription string `json:"friendlyDescription" protobuf:"bytes,3,opt,name=friendlyDescription"` - // +optional - UsefulErrorMessage string `json:"usefulErrorMessage,omitempty" protobuf:"bytes,4,opt,name=usefulErrorMessage"` -} - -// ConditionType represents reconciler state -type ConditionType string - -// Constants representing reconciler state as a condition -const ( - Reconciling ConditionType = "Reconciling" - ReconcileFailed ConditionType = "ReconcileFailed" - ReconcileSucceeded ConditionType = "ReconcileSucceeded" - - Deleting ConditionType = "Deleting" - DeleteFailed ConditionType = "DeleteFailed" -) - -// +k8s:openapi-gen=true -type Condition struct { - Type ConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConditionType"` - Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying - // persistent volume is being resized. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - // Human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types.go deleted file mode 100644 index 9c2fb5a60..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:categories={carvel} -// +kubebuilder:printcolumn:name=Description,JSONPath=.status.friendlyDescription,description=Friendly description,type=string -// +kubebuilder:printcolumn:name=Since-Deploy,JSONPath=.status.deploy.startedAt,description=Last time app started being deployed. Does not mean anything was changed.,type=date -// +kubebuilder:printcolumn:name=Age,JSONPath=.metadata.creationTimestamp,description=Time since creation,type=date -// +protobuf=false -// An App is a set of Kubernetes resources. These resources could span any number of namespaces or could be cluster-wide (e.g. CRDs). An App is represented in kapp-controller using a App CR. -// The App CR comprises of three main sections: -// spec.fetch – declare source for fetching configuration and OCI images -// spec.template – declare templating tool and values -// spec.deploy – declare deployment tool and any deploy specific configuration -type App struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AppSpec `json:"spec"` - // +optional - Status AppStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +protobuf=false -type AppList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []App `json:"items"` -} - -// +k8s:openapi-gen=true -type AppSpec struct { - // Specifies that app should be deployed authenticated via - // given service account, found in this namespace (optional; v0.6.0+) - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,1,opt,name=serviceAccountName"` - // Specifies that app should be deployed to destination cluster; - // by default, cluster is same as where this resource resides (optional; v0.5.0+) - // +optional - Cluster *AppCluster `json:"cluster,omitempty" protobuf:"bytes,2,opt,name=cluster"` - // +optional - Fetch []AppFetch `json:"fetch,omitempty" protobuf:"bytes,3,rep,name=fetch"` - // +optional - Template []AppTemplate `json:"template,omitempty" protobuf:"bytes,4,rep,name=template"` - // +optional - Deploy []AppDeploy `json:"deploy,omitempty" protobuf:"bytes,5,rep,name=deploy"` - // Pauses _future_ reconciliation; does _not_ affect - // currently running reconciliation (optional; default=false) - // +optional - Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` - // Cancels current and future reconciliations (optional; default=false) - // +optional - Canceled bool `json:"canceled,omitempty" protobuf:"varint,7,opt,name=canceled"` - // Specifies the length of time to wait, in time + unit - // format, before reconciling. Always >= 30s. If value below - // 30s is specified, 30s will be used. (optional; v0.9.0+; default=30s) - // +optional - SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty" protobuf:"bytes,8,opt,name=syncPeriod"` - // Deletion requests for the App will result in the App CR being - // deleted, but its associated resources will not be deleted - // (optional; default=false; v0.18.0+) - // +optional - NoopDelete bool `json:"noopDelete,omitempty" protobuf:"varint,9,opt,name=noopDelete"` - // Specifies the default namespace to install the App resources, by default this is - // same as the App's namespace (optional; v0.48.0+) - // +optional - DefaultNamespace string `json:"defaultNamespace,omitempty" protobuf:"bytes,10,opt,name=defaultNamespace"` -} - -// +k8s:openapi-gen=true -type AppCluster struct { - // Specifies namespace in destination cluster (optional) - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` - // Specifies secret containing kubeconfig (required) - // +optional - KubeconfigSecretRef *AppClusterKubeconfigSecretRef `json:"kubeconfigSecretRef,omitempty" protobuf:"bytes,2,opt,name=kubeconfigSecretRef"` -} - -// +k8s:openapi-gen=true -type AppClusterKubeconfigSecretRef struct { - // Specifies secret name within app's namespace (required) - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Specifies key that contains kubeconfig (optional) - // +optional - Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"` -} - -// +protobuf=false -type AppStatus struct { - // +optional - ManagedAppName string `json:"managedAppName,omitempty"` - // +optional - Fetch *AppStatusFetch `json:"fetch,omitempty"` - // +optional - Template *AppStatusTemplate `json:"template,omitempty"` - // +optional - Deploy *AppStatusDeploy `json:"deploy,omitempty"` - // +optional - Inspect *AppStatusInspect `json:"inspect,omitempty"` - // +optional - ConsecutiveReconcileSuccesses int `json:"consecutiveReconcileSuccesses,omitempty"` - // +optional - ConsecutiveReconcileFailures int `json:"consecutiveReconcileFailures,omitempty"` - // +optional - GenericStatus `json:",inline"` -} - -// +protobuf=false -type AppStatusFetch struct { - // +optional - Stderr string `json:"stderr,omitempty"` - // +optional - Stdout string `json:"stdout,omitempty"` - // +optional - ExitCode int `json:"exitCode"` - // +optional - Error string `json:"error,omitempty"` - // +optional - StartedAt metav1.Time `json:"startedAt,omitempty"` - // +optional - UpdatedAt metav1.Time `json:"updatedAt,omitempty"` -} - -// +protobuf=false -type AppStatusTemplate struct { - // +optional - Stderr string `json:"stderr,omitempty"` - // +optional - ExitCode int `json:"exitCode"` - // +optional - Error string `json:"error,omitempty"` - // +optional - UpdatedAt metav1.Time `json:"updatedAt,omitempty"` -} - -// +protobuf=false -type AppStatusDeploy struct { - // +optional - Stdout string `json:"stdout,omitempty"` - // +optional - Stderr string `json:"stderr,omitempty"` - // +optional - Finished bool `json:"finished"` - // +optional - ExitCode int `json:"exitCode"` - // +optional - Error string `json:"error,omitempty"` - // +optional - StartedAt metav1.Time `json:"startedAt,omitempty"` - // +optional - UpdatedAt metav1.Time `json:"updatedAt,omitempty"` - // +optional - KappDeployStatus *KappDeployStatus `json:"kapp,omitempty"` -} - -// KappDeployStatus contains the associated AppCR deployed resources -// +protobuf=false -type KappDeployStatus struct { - AssociatedResources AssociatedResources `json:"associatedResources,omitempty"` -} - -// AssociatedResources contains the associated App label, namespaces and GKs -// +protobuf=false -type AssociatedResources struct { - Label string `json:"label,omitempty"` - Namespaces []string `json:"namespaces,omitempty"` - GroupKinds []metav1.GroupKind `json:"groupKinds,omitempty"` -} - -// +protobuf=false -type AppStatusInspect struct { - // +optional - Stdout string `json:"stdout,omitempty"` - // +optional - Stderr string `json:"stderr,omitempty"` - // +optional - ExitCode int `json:"exitCode"` - // +optional - Error string `json:"error,omitempty"` - // +optional - UpdatedAt metav1.Time `json:"updatedAt,omitempty"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_deploy.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_deploy.go deleted file mode 100644 index ff52e5f03..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_deploy.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -// +k8s:openapi-gen=true -type AppDeploy struct { - // Use kapp to deploy resources - Kapp *AppDeployKapp `json:"kapp,omitempty" protobuf:"bytes,1,opt,name=kapp"` -} - -// +k8s:openapi-gen=true -type AppDeployKapp struct { - // Override namespace for all resources (optional) - IntoNs string `json:"intoNs,omitempty" protobuf:"bytes,1,opt,name=intoNs"` - // Provide custom namespace override mapping (optional) - MapNs []string `json:"mapNs,omitempty" protobuf:"bytes,2,rep,name=mapNs"` - // Pass through options to kapp deploy (optional) - RawOptions []string `json:"rawOptions,omitempty" protobuf:"bytes,3,rep,name=rawOptions"` - - // Configuration for inspect command (optional) - // as of kapp-controller v0.31.0, inspect is disabled by default - // add rawOptions or use an empty inspect config like `inspect: {}` to enable - Inspect *AppDeployKappInspect `json:"inspect,omitempty" protobuf:"bytes,4,opt,name=inspect"` - // Configuration for delete command (optional) - Delete *AppDeployKappDelete `json:"delete,omitempty" protobuf:"bytes,5,opt,name=delete"` -} - -// +k8s:openapi-gen=true -type AppDeployKappInspect struct { - // Pass through options to kapp inspect (optional) - RawOptions []string `json:"rawOptions,omitempty" protobuf:"bytes,1,rep,name=rawOptions"` -} - -// +k8s:openapi-gen=true -type AppDeployKappDelete struct { - // Pass through options to kapp delete (optional) - RawOptions []string `json:"rawOptions,omitempty" protobuf:"bytes,1,rep,name=rawOptions"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_fetch.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_fetch.go deleted file mode 100644 index f3a221a73..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_fetch.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - versions "carvel.dev/vendir/pkg/vendir/versions/v1alpha1" -) - -// +k8s:openapi-gen=true -type AppFetch struct { - // Pulls content from within this resource; or other resources in the cluster - Inline *AppFetchInline `json:"inline,omitempty" protobuf:"bytes,1,opt,name=inline"` - // Pulls content from Docker/OCI registry - Image *AppFetchImage `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Uses http library to fetch file - HTTP *AppFetchHTTP `json:"http,omitempty" protobuf:"bytes,3,opt,name=http"` - // Uses git to clone repository - Git *AppFetchGit `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` - // Uses helm fetch to fetch specified chart - HelmChart *AppFetchHelmChart `json:"helmChart,omitempty" protobuf:"bytes,5,opt,name=helmChart"` - // Pulls imgpkg bundle from Docker/OCI registry (v0.17.0+) - ImgpkgBundle *AppFetchImgpkgBundle `json:"imgpkgBundle,omitempty" protobuf:"bytes,6,opt,name=imgpkgBundle"` - // Relative path to place the fetched artifacts - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,7,opt,name=path"` -} - -// +k8s:openapi-gen=true -type AppFetchInline struct { - // Specifies mapping of paths to their content; - // not recommended for sensitive values as CR is not encrypted (optional) - Paths map[string]string `json:"paths,omitempty" protobuf:"bytes,1,rep,name=paths"` - // Specifies content via secrets and config maps; - // data values are recommended to be placed in secrets (optional) - PathsFrom []AppFetchInlineSource `json:"pathsFrom,omitempty" protobuf:"bytes,2,rep,name=pathsFrom"` -} - -// +k8s:openapi-gen=true -type AppFetchInlineSource struct { - SecretRef *AppFetchInlineSourceRef `json:"secretRef,omitempty" protobuf:"bytes,1,opt,name=secretRef"` - ConfigMapRef *AppFetchInlineSourceRef `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` -} - -// +k8s:openapi-gen=true -type AppFetchInlineSourceRef struct { - // Specifies where to place files found in secret (optional) - DirectoryPath string `json:"directoryPath,omitempty" protobuf:"bytes,2,opt,name=directoryPath"` - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:openapi-gen=true -type AppFetchImage struct { - // Docker image url; unqualified, tagged, or - // digest references supported (required) - // Example: username/app1-config:v0.1.0 - URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` - // Specifies a strategy to choose a tag (optional; v0.24.0+) - // if specified, do not include a tag in url key - // +optional - TagSelection *versions.VersionSelection `json:"tagSelection,omitempty" protobuf:"bytes,4,opt,name=tagSelection"` - // Secret may include one or more keys: username, password, token. - // By default anonymous access is used for authentication. - // +optional - SecretRef *AppFetchLocalRef `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` - // Grab only portion of image (optional) - // +optional - SubPath string `json:"subPath,omitempty" protobuf:"bytes,3,opt,name=subPath"` -} - -// +k8s:openapi-gen=true -type AppFetchHTTP struct { - // URL can point to one of following formats: text, tgz, zip - // http and https url are supported; - // plain file, tgz and tar types are supported (required) - URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` - // Checksum to verify after download (optional) - // +optional - SHA256 string `json:"sha256,omitempty" protobuf:"bytes,2,opt,name=sha256"` - // Secret to provide auth details (optional) - // Secret may include one or more keys: username, password - // +optional - SecretRef *AppFetchLocalRef `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Grab only portion of download (optional) - // +optional - SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` -} - -// +k8s:openapi-gen=true -type AppFetchGit struct { - // http or ssh urls are supported (required) - URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` - // Branch, tag, commit; origin is the name of the remote (optional) - // +optional - Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` - // Specifies a strategy to resolve to an explicit ref (optional; v0.24.0+) - // +optional - RefSelection *versions.VersionSelection `json:"refSelection,omitempty" protobuf:"bytes,6,opt,name=refSelection"` - // Secret with auth details. allowed keys: ssh-privatekey, ssh-knownhosts, username, password (optional) - // (if ssh-knownhosts is not specified, git will not perform strict host checking) - // +optional - SecretRef *AppFetchLocalRef `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Grab only portion of repository (optional) - // +optional - SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` - // Skip lfs download (optional) - // +optional - LFSSkipSmudge bool `json:"lfsSkipSmudge,omitempty" protobuf:"varint,5,opt,name=lfsSkipSmudge"` - // Force the usage of HTTP Basic Auth when Basic Auth is provided (optional) - // +optional - ForceHTTPBasicAuth bool `json:"forceHTTPBasicAuth,omitempty" protobuf:"varint,7,opt,name=forceHTTPBasicAuth"` -} - -// +k8s:openapi-gen=true -type AppFetchHelmChart struct { - // Example: stable/redis - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // +optional - Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` - Repository *AppFetchHelmChartRepo `json:"repository,omitempty" protobuf:"bytes,3,opt,name=repository"` -} - -// +k8s:openapi-gen=true -type AppFetchHelmChartRepo struct { - // Repository url; - // scheme of oci:// will fetch experimental helm oci chart (v0.19.0+) - // (required) - URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` - // +optional - SecretRef *AppFetchLocalRef `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` -} - -// +k8s:openapi-gen=true -type AppFetchLocalRef struct { - // Object is expected to be within same namespace - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:openapi-gen=true -type AppFetchImgpkgBundle struct { - // Docker image url; unqualified, tagged, or - // digest references supported (required) - Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` - // Specifies a strategy to choose a tag (optional; v0.24.0+) - // if specified, do not include a tag in url key - // +optional - TagSelection *versions.VersionSelection `json:"tagSelection,omitempty" protobuf:"bytes,3,opt,name=tagSelection"` - // Secret may include one or more keys: username, password, token. - // By default anonymous access is used for authentication. - // +optional - SecretRef *AppFetchLocalRef `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_template.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_template.go deleted file mode 100644 index 09c7a0055..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/types_template.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -//nolint:revive // we're unlikely to write descriptive godoc comments in this file. -package v1alpha1 - -// +k8s:openapi-gen=true -type AppTemplate struct { - // Use ytt to template configuration - Ytt *AppTemplateYtt `json:"ytt,omitempty" protobuf:"bytes,1,opt,name=ytt"` - // Use kbld to resolve image references to use digests - Kbld *AppTemplateKbld `json:"kbld,omitempty" protobuf:"bytes,2,opt,name=kbld"` - // Use helm template command to render helm chart - HelmTemplate *AppTemplateHelmTemplate `json:"helmTemplate,omitempty" protobuf:"bytes,3,opt,name=helmTemplate"` - Kustomize *AppTemplateKustomize `json:"kustomize,omitempty" protobuf:"bytes,4,opt,name=kustomize"` - Jsonnet *AppTemplateJsonnet `json:"jsonnet,omitempty" protobuf:"bytes,5,opt,name=jsonnet"` - // Use sops to decrypt *.sops.yml files (optional; v0.11.0+) - Sops *AppTemplateSops `json:"sops,omitempty" protobuf:"bytes,6,opt,name=sops"` - Cue *AppTemplateCue `json:"cue,omitempty" protobuf:"bytes,7,opt,name=cue"` -} - -// +k8s:openapi-gen=true -type AppTemplateYtt struct { - // Ignores comments that ytt doesn't recognize - // (optional; default=false) - IgnoreUnknownComments bool `json:"ignoreUnknownComments,omitempty" protobuf:"varint,1,opt,name=ignoreUnknownComments"` - // Forces strict mode https://github.com/k14s/ytt/blob/develop/docs/strict.md - // (optional; default=false) - Strict bool `json:"strict,omitempty" protobuf:"varint,2,opt,name=strict"` - // Specify additional files, including data values (optional) - Inline *AppFetchInline `json:"inline,omitempty" protobuf:"bytes,3,opt,name=inline"` - // Lists paths to provide to ytt explicitly (optional) - Paths []string `json:"paths,omitempty" protobuf:"bytes,4,rep,name=paths"` - // Control metadata about input files passed to ytt (optional; v0.18.0+) - // see https://carvel.dev/ytt/docs/latest/file-marks/ for more details - FileMarks []string `json:"fileMarks,omitempty" protobuf:"bytes,5,rep,name=fileMarks"` - // Provide values via ytt's --data-values-file (optional; v0.19.0-alpha.9) - ValuesFrom []AppTemplateValuesSource `json:"valuesFrom,omitempty" protobuf:"bytes,6,rep,name=valuesFrom"` -} - -// +k8s:openapi-gen=true -type AppTemplateKbld struct { - Paths []string `json:"paths,omitempty" protobuf:"bytes,1,rep,name=paths"` -} - -// +k8s:openapi-gen=true -type AppTemplateHelmTemplate struct { - // Set name explicitly, default is App CR's name (optional; v0.13.0+) - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Set namespace explicitly, default is App CR's namespace (optional; v0.13.0+) - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` - // Path to chart (optional; v0.13.0+) - Path string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - // One or more secrets, config maps, paths that provide values (optional) - ValuesFrom []AppTemplateValuesSource `json:"valuesFrom,omitempty" protobuf:"bytes,4,rep,name=valuesFrom"` - // Optional: Get Kubernetes version, defaults (empty) to retrieving the version from the cluster. - // Can be manually overridden to a value instead. - KubernetesVersion *Version `json:"kubernetesVersion,omitempty" protobuf:"bytes,5,opt,name=kubernetesVersion"` - // Optional: Use kubernetes group/versions resources available in the live cluster - KubernetesAPIs *KubernetesAPIs `json:"kubernetesAPIs,omitempty" protobuf:"bytes,6,opt,name=kubernetesAPIs"` -} - -// +k8s:openapi-gen=true -type AppTemplateValuesSource struct { - SecretRef *AppTemplateValuesSourceRef `json:"secretRef,omitempty" protobuf:"bytes,1,opt,name=secretRef"` - ConfigMapRef *AppTemplateValuesSourceRef `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` - Path string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - DownwardAPI *AppTemplateValuesDownwardAPI `json:"downwardAPI,omitempty" protobuf:"bytes,4,opt,name=downwardAPI"` -} - -// +k8s:openapi-gen=true -type AppTemplateValuesSourceRef struct { - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:openapi-gen=true -type AppTemplateValuesDownwardAPI struct { - Items []AppTemplateValuesDownwardAPIItem `json:"items,omitempty" protobuf:"bytes,1,opt,name=items"` -} - -// +k8s:openapi-gen=true -type AppTemplateValuesDownwardAPIItem struct { - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Required: Selects a field of the app: only annotations, labels, uid, name and namespace are supported. - FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,2,opt,name=fieldPath"` - // Optional: Get running Kubernetes version from cluster, defaults (empty) to retrieving the version from the cluster. - // Can be manually supplied instead. - KubernetesVersion *Version `json:"kubernetesVersion,omitempty" protobuf:"bytes,3,opt,name=kubernetesVersion"` - // Optional: Get running KappController version, defaults (empty) to retrieving the current running version.. - // Can be manually supplied instead. - KappControllerVersion *Version `json:"kappControllerVersion,omitempty" protobuf:"bytes,4,opt,name=kappControllerVersion"` - // Optional: Get running KubernetesAPIs from cluster, defaults (empty) to retrieving the APIs from the cluster. - // Can be manually supplied instead, e.g ["group/version", "group2/version2"] - KubernetesAPIs *KubernetesAPIs `json:"kubernetesAPIs,omitempty" protobuf:"bytes,5,opt,name=kubernetesAPIs"` -} - -// +k8s:openapi-gen=true -type Version struct { - Version string `json:"version,omitempty" protobuf:"bytes,1,opt,name=version"` -} - -// +k8s:openapi-gen=true -type KubernetesAPIs struct { - GroupVersions []string `json:"groupVersions,omitempty" protobuf:"bytes,1,opt,name=groupVersions"` -} - -// TODO implement kustomize -// +k8s:openapi-gen=true -type AppTemplateKustomize struct{} - -// TODO implement jsonnet -// +k8s:openapi-gen=true -type AppTemplateJsonnet struct{} - -// +k8s:openapi-gen=true -type AppTemplateSops struct { - // Use PGP to decrypt files (required) - PGP *AppTemplateSopsPGP `json:"pgp,omitempty" protobuf:"bytes,1,opt,name=pgp"` - // Lists paths to decrypt explicitly (optional; v0.13.0+) - Paths []string `json:"paths,omitempty" protobuf:"bytes,2,rep,name=paths"` - Age *AppTemplateSopsAge `json:"age,omitempty" protobuf:"bytes,3,opt,name=age"` -} - -// +k8s:openapi-gen=true -type AppTemplateSopsPGP struct { - // Secret with private armored PGP private keys (required) - PrivateKeysSecretRef *AppTemplateSopsPrivateKeysSecretRef `json:"privateKeysSecretRef,omitempty" protobuf:"bytes,1,opt,name=privateKeysSecretRef"` -} - -// +k8s:openapi-gen=true -type AppTemplateSopsAge struct { - // Secret with private armored PGP private keys (required) - PrivateKeysSecretRef *AppTemplateSopsPrivateKeysSecretRef `json:"privateKeysSecretRef,omitempty" protobuf:"bytes,1,opt,name=privateKeysSecretRef"` -} - -// +k8s:openapi-gen=true -type AppTemplateSopsPrivateKeysSecretRef struct { - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:openapi-gen=true -type AppTemplateCue struct { - // Explicit list of files/directories (optional) - Paths []string `json:"paths,omitempty" protobuf:"bytes,1,rep,name=paths"` - // Provide values (optional) - ValuesFrom []AppTemplateValuesSource `json:"valuesFrom,omitempty" protobuf:"bytes,2,rep,name=valuesFrom"` - // Cue expression for single path component, can be used to unify ValuesFrom into a given field (optional) - InputExpression string `json:"inputExpression,omitempty" protobuf:"bytes,3,opt,name=inputExpression"` - // Cue expression to output, default will export all visible fields (optional) - OutputExpression string `json:"outputExpression,omitempty" protobuf:"bytes,4,opt,name=outputExpression"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 478450469..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1153 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -import ( - versionsv1alpha1 "carvel.dev/vendir/pkg/vendir/versions/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *App) DeepCopyInto(out *App) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new App. -func (in *App) DeepCopy() *App { - if in == nil { - return nil - } - out := new(App) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *App) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppCluster) DeepCopyInto(out *AppCluster) { - *out = *in - if in.KubeconfigSecretRef != nil { - in, out := &in.KubeconfigSecretRef, &out.KubeconfigSecretRef - *out = new(AppClusterKubeconfigSecretRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppCluster. -func (in *AppCluster) DeepCopy() *AppCluster { - if in == nil { - return nil - } - out := new(AppCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppClusterKubeconfigSecretRef) DeepCopyInto(out *AppClusterKubeconfigSecretRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppClusterKubeconfigSecretRef. -func (in *AppClusterKubeconfigSecretRef) DeepCopy() *AppClusterKubeconfigSecretRef { - if in == nil { - return nil - } - out := new(AppClusterKubeconfigSecretRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppDeploy) DeepCopyInto(out *AppDeploy) { - *out = *in - if in.Kapp != nil { - in, out := &in.Kapp, &out.Kapp - *out = new(AppDeployKapp) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppDeploy. -func (in *AppDeploy) DeepCopy() *AppDeploy { - if in == nil { - return nil - } - out := new(AppDeploy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppDeployKapp) DeepCopyInto(out *AppDeployKapp) { - *out = *in - if in.MapNs != nil { - in, out := &in.MapNs, &out.MapNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.RawOptions != nil { - in, out := &in.RawOptions, &out.RawOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Inspect != nil { - in, out := &in.Inspect, &out.Inspect - *out = new(AppDeployKappInspect) - (*in).DeepCopyInto(*out) - } - if in.Delete != nil { - in, out := &in.Delete, &out.Delete - *out = new(AppDeployKappDelete) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppDeployKapp. -func (in *AppDeployKapp) DeepCopy() *AppDeployKapp { - if in == nil { - return nil - } - out := new(AppDeployKapp) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppDeployKappDelete) DeepCopyInto(out *AppDeployKappDelete) { - *out = *in - if in.RawOptions != nil { - in, out := &in.RawOptions, &out.RawOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppDeployKappDelete. -func (in *AppDeployKappDelete) DeepCopy() *AppDeployKappDelete { - if in == nil { - return nil - } - out := new(AppDeployKappDelete) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppDeployKappInspect) DeepCopyInto(out *AppDeployKappInspect) { - *out = *in - if in.RawOptions != nil { - in, out := &in.RawOptions, &out.RawOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppDeployKappInspect. -func (in *AppDeployKappInspect) DeepCopy() *AppDeployKappInspect { - if in == nil { - return nil - } - out := new(AppDeployKappInspect) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetch) DeepCopyInto(out *AppFetch) { - *out = *in - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(AppFetchInline) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(AppFetchImage) - (*in).DeepCopyInto(*out) - } - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(AppFetchHTTP) - (*in).DeepCopyInto(*out) - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(AppFetchGit) - (*in).DeepCopyInto(*out) - } - if in.HelmChart != nil { - in, out := &in.HelmChart, &out.HelmChart - *out = new(AppFetchHelmChart) - (*in).DeepCopyInto(*out) - } - if in.ImgpkgBundle != nil { - in, out := &in.ImgpkgBundle, &out.ImgpkgBundle - *out = new(AppFetchImgpkgBundle) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetch. -func (in *AppFetch) DeepCopy() *AppFetch { - if in == nil { - return nil - } - out := new(AppFetch) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchGit) DeepCopyInto(out *AppFetchGit) { - *out = *in - if in.RefSelection != nil { - in, out := &in.RefSelection, &out.RefSelection - *out = new(versionsv1alpha1.VersionSelection) - (*in).DeepCopyInto(*out) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppFetchLocalRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchGit. -func (in *AppFetchGit) DeepCopy() *AppFetchGit { - if in == nil { - return nil - } - out := new(AppFetchGit) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchHTTP) DeepCopyInto(out *AppFetchHTTP) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppFetchLocalRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchHTTP. -func (in *AppFetchHTTP) DeepCopy() *AppFetchHTTP { - if in == nil { - return nil - } - out := new(AppFetchHTTP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchHelmChart) DeepCopyInto(out *AppFetchHelmChart) { - *out = *in - if in.Repository != nil { - in, out := &in.Repository, &out.Repository - *out = new(AppFetchHelmChartRepo) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchHelmChart. -func (in *AppFetchHelmChart) DeepCopy() *AppFetchHelmChart { - if in == nil { - return nil - } - out := new(AppFetchHelmChart) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchHelmChartRepo) DeepCopyInto(out *AppFetchHelmChartRepo) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppFetchLocalRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchHelmChartRepo. -func (in *AppFetchHelmChartRepo) DeepCopy() *AppFetchHelmChartRepo { - if in == nil { - return nil - } - out := new(AppFetchHelmChartRepo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchImage) DeepCopyInto(out *AppFetchImage) { - *out = *in - if in.TagSelection != nil { - in, out := &in.TagSelection, &out.TagSelection - *out = new(versionsv1alpha1.VersionSelection) - (*in).DeepCopyInto(*out) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppFetchLocalRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchImage. -func (in *AppFetchImage) DeepCopy() *AppFetchImage { - if in == nil { - return nil - } - out := new(AppFetchImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchImgpkgBundle) DeepCopyInto(out *AppFetchImgpkgBundle) { - *out = *in - if in.TagSelection != nil { - in, out := &in.TagSelection, &out.TagSelection - *out = new(versionsv1alpha1.VersionSelection) - (*in).DeepCopyInto(*out) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppFetchLocalRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchImgpkgBundle. -func (in *AppFetchImgpkgBundle) DeepCopy() *AppFetchImgpkgBundle { - if in == nil { - return nil - } - out := new(AppFetchImgpkgBundle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchInline) DeepCopyInto(out *AppFetchInline) { - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PathsFrom != nil { - in, out := &in.PathsFrom, &out.PathsFrom - *out = make([]AppFetchInlineSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchInline. -func (in *AppFetchInline) DeepCopy() *AppFetchInline { - if in == nil { - return nil - } - out := new(AppFetchInline) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchInlineSource) DeepCopyInto(out *AppFetchInlineSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppFetchInlineSourceRef) - **out = **in - } - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(AppFetchInlineSourceRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchInlineSource. -func (in *AppFetchInlineSource) DeepCopy() *AppFetchInlineSource { - if in == nil { - return nil - } - out := new(AppFetchInlineSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchInlineSourceRef) DeepCopyInto(out *AppFetchInlineSourceRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchInlineSourceRef. -func (in *AppFetchInlineSourceRef) DeepCopy() *AppFetchInlineSourceRef { - if in == nil { - return nil - } - out := new(AppFetchInlineSourceRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppFetchLocalRef) DeepCopyInto(out *AppFetchLocalRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFetchLocalRef. -func (in *AppFetchLocalRef) DeepCopy() *AppFetchLocalRef { - if in == nil { - return nil - } - out := new(AppFetchLocalRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppList) DeepCopyInto(out *AppList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]App, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppList. -func (in *AppList) DeepCopy() *AppList { - if in == nil { - return nil - } - out := new(AppList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AppList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppSpec) DeepCopyInto(out *AppSpec) { - *out = *in - if in.Cluster != nil { - in, out := &in.Cluster, &out.Cluster - *out = new(AppCluster) - (*in).DeepCopyInto(*out) - } - if in.Fetch != nil { - in, out := &in.Fetch, &out.Fetch - *out = make([]AppFetch, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = make([]AppTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Deploy != nil { - in, out := &in.Deploy, &out.Deploy - *out = make([]AppDeploy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SyncPeriod != nil { - in, out := &in.SyncPeriod, &out.SyncPeriod - *out = new(v1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSpec. -func (in *AppSpec) DeepCopy() *AppSpec { - if in == nil { - return nil - } - out := new(AppSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppStatus) DeepCopyInto(out *AppStatus) { - *out = *in - if in.Fetch != nil { - in, out := &in.Fetch, &out.Fetch - *out = new(AppStatusFetch) - (*in).DeepCopyInto(*out) - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(AppStatusTemplate) - (*in).DeepCopyInto(*out) - } - if in.Deploy != nil { - in, out := &in.Deploy, &out.Deploy - *out = new(AppStatusDeploy) - (*in).DeepCopyInto(*out) - } - if in.Inspect != nil { - in, out := &in.Inspect, &out.Inspect - *out = new(AppStatusInspect) - (*in).DeepCopyInto(*out) - } - in.GenericStatus.DeepCopyInto(&out.GenericStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatus. -func (in *AppStatus) DeepCopy() *AppStatus { - if in == nil { - return nil - } - out := new(AppStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppStatusDeploy) DeepCopyInto(out *AppStatusDeploy) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.UpdatedAt.DeepCopyInto(&out.UpdatedAt) - if in.KappDeployStatus != nil { - in, out := &in.KappDeployStatus, &out.KappDeployStatus - *out = new(KappDeployStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatusDeploy. -func (in *AppStatusDeploy) DeepCopy() *AppStatusDeploy { - if in == nil { - return nil - } - out := new(AppStatusDeploy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppStatusFetch) DeepCopyInto(out *AppStatusFetch) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.UpdatedAt.DeepCopyInto(&out.UpdatedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatusFetch. -func (in *AppStatusFetch) DeepCopy() *AppStatusFetch { - if in == nil { - return nil - } - out := new(AppStatusFetch) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppStatusInspect) DeepCopyInto(out *AppStatusInspect) { - *out = *in - in.UpdatedAt.DeepCopyInto(&out.UpdatedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatusInspect. -func (in *AppStatusInspect) DeepCopy() *AppStatusInspect { - if in == nil { - return nil - } - out := new(AppStatusInspect) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppStatusTemplate) DeepCopyInto(out *AppStatusTemplate) { - *out = *in - in.UpdatedAt.DeepCopyInto(&out.UpdatedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatusTemplate. -func (in *AppStatusTemplate) DeepCopy() *AppStatusTemplate { - if in == nil { - return nil - } - out := new(AppStatusTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplate) DeepCopyInto(out *AppTemplate) { - *out = *in - if in.Ytt != nil { - in, out := &in.Ytt, &out.Ytt - *out = new(AppTemplateYtt) - (*in).DeepCopyInto(*out) - } - if in.Kbld != nil { - in, out := &in.Kbld, &out.Kbld - *out = new(AppTemplateKbld) - (*in).DeepCopyInto(*out) - } - if in.HelmTemplate != nil { - in, out := &in.HelmTemplate, &out.HelmTemplate - *out = new(AppTemplateHelmTemplate) - (*in).DeepCopyInto(*out) - } - if in.Kustomize != nil { - in, out := &in.Kustomize, &out.Kustomize - *out = new(AppTemplateKustomize) - **out = **in - } - if in.Jsonnet != nil { - in, out := &in.Jsonnet, &out.Jsonnet - *out = new(AppTemplateJsonnet) - **out = **in - } - if in.Sops != nil { - in, out := &in.Sops, &out.Sops - *out = new(AppTemplateSops) - (*in).DeepCopyInto(*out) - } - if in.Cue != nil { - in, out := &in.Cue, &out.Cue - *out = new(AppTemplateCue) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplate. -func (in *AppTemplate) DeepCopy() *AppTemplate { - if in == nil { - return nil - } - out := new(AppTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateCue) DeepCopyInto(out *AppTemplateCue) { - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ValuesFrom != nil { - in, out := &in.ValuesFrom, &out.ValuesFrom - *out = make([]AppTemplateValuesSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateCue. -func (in *AppTemplateCue) DeepCopy() *AppTemplateCue { - if in == nil { - return nil - } - out := new(AppTemplateCue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateHelmTemplate) DeepCopyInto(out *AppTemplateHelmTemplate) { - *out = *in - if in.ValuesFrom != nil { - in, out := &in.ValuesFrom, &out.ValuesFrom - *out = make([]AppTemplateValuesSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.KubernetesVersion != nil { - in, out := &in.KubernetesVersion, &out.KubernetesVersion - *out = new(Version) - **out = **in - } - if in.KubernetesAPIs != nil { - in, out := &in.KubernetesAPIs, &out.KubernetesAPIs - *out = new(KubernetesAPIs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateHelmTemplate. -func (in *AppTemplateHelmTemplate) DeepCopy() *AppTemplateHelmTemplate { - if in == nil { - return nil - } - out := new(AppTemplateHelmTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateJsonnet) DeepCopyInto(out *AppTemplateJsonnet) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateJsonnet. -func (in *AppTemplateJsonnet) DeepCopy() *AppTemplateJsonnet { - if in == nil { - return nil - } - out := new(AppTemplateJsonnet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateKbld) DeepCopyInto(out *AppTemplateKbld) { - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateKbld. -func (in *AppTemplateKbld) DeepCopy() *AppTemplateKbld { - if in == nil { - return nil - } - out := new(AppTemplateKbld) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateKustomize) DeepCopyInto(out *AppTemplateKustomize) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateKustomize. -func (in *AppTemplateKustomize) DeepCopy() *AppTemplateKustomize { - if in == nil { - return nil - } - out := new(AppTemplateKustomize) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateSops) DeepCopyInto(out *AppTemplateSops) { - *out = *in - if in.PGP != nil { - in, out := &in.PGP, &out.PGP - *out = new(AppTemplateSopsPGP) - (*in).DeepCopyInto(*out) - } - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Age != nil { - in, out := &in.Age, &out.Age - *out = new(AppTemplateSopsAge) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateSops. -func (in *AppTemplateSops) DeepCopy() *AppTemplateSops { - if in == nil { - return nil - } - out := new(AppTemplateSops) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateSopsAge) DeepCopyInto(out *AppTemplateSopsAge) { - *out = *in - if in.PrivateKeysSecretRef != nil { - in, out := &in.PrivateKeysSecretRef, &out.PrivateKeysSecretRef - *out = new(AppTemplateSopsPrivateKeysSecretRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateSopsAge. -func (in *AppTemplateSopsAge) DeepCopy() *AppTemplateSopsAge { - if in == nil { - return nil - } - out := new(AppTemplateSopsAge) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateSopsPGP) DeepCopyInto(out *AppTemplateSopsPGP) { - *out = *in - if in.PrivateKeysSecretRef != nil { - in, out := &in.PrivateKeysSecretRef, &out.PrivateKeysSecretRef - *out = new(AppTemplateSopsPrivateKeysSecretRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateSopsPGP. -func (in *AppTemplateSopsPGP) DeepCopy() *AppTemplateSopsPGP { - if in == nil { - return nil - } - out := new(AppTemplateSopsPGP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateSopsPrivateKeysSecretRef) DeepCopyInto(out *AppTemplateSopsPrivateKeysSecretRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateSopsPrivateKeysSecretRef. -func (in *AppTemplateSopsPrivateKeysSecretRef) DeepCopy() *AppTemplateSopsPrivateKeysSecretRef { - if in == nil { - return nil - } - out := new(AppTemplateSopsPrivateKeysSecretRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateValuesDownwardAPI) DeepCopyInto(out *AppTemplateValuesDownwardAPI) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AppTemplateValuesDownwardAPIItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateValuesDownwardAPI. -func (in *AppTemplateValuesDownwardAPI) DeepCopy() *AppTemplateValuesDownwardAPI { - if in == nil { - return nil - } - out := new(AppTemplateValuesDownwardAPI) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateValuesDownwardAPIItem) DeepCopyInto(out *AppTemplateValuesDownwardAPIItem) { - *out = *in - if in.KubernetesVersion != nil { - in, out := &in.KubernetesVersion, &out.KubernetesVersion - *out = new(Version) - **out = **in - } - if in.KappControllerVersion != nil { - in, out := &in.KappControllerVersion, &out.KappControllerVersion - *out = new(Version) - **out = **in - } - if in.KubernetesAPIs != nil { - in, out := &in.KubernetesAPIs, &out.KubernetesAPIs - *out = new(KubernetesAPIs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateValuesDownwardAPIItem. -func (in *AppTemplateValuesDownwardAPIItem) DeepCopy() *AppTemplateValuesDownwardAPIItem { - if in == nil { - return nil - } - out := new(AppTemplateValuesDownwardAPIItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateValuesSource) DeepCopyInto(out *AppTemplateValuesSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(AppTemplateValuesSourceRef) - **out = **in - } - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(AppTemplateValuesSourceRef) - **out = **in - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(AppTemplateValuesDownwardAPI) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateValuesSource. -func (in *AppTemplateValuesSource) DeepCopy() *AppTemplateValuesSource { - if in == nil { - return nil - } - out := new(AppTemplateValuesSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateValuesSourceRef) DeepCopyInto(out *AppTemplateValuesSourceRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateValuesSourceRef. -func (in *AppTemplateValuesSourceRef) DeepCopy() *AppTemplateValuesSourceRef { - if in == nil { - return nil - } - out := new(AppTemplateValuesSourceRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppTemplateYtt) DeepCopyInto(out *AppTemplateYtt) { - *out = *in - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(AppFetchInline) - (*in).DeepCopyInto(*out) - } - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.FileMarks != nil { - in, out := &in.FileMarks, &out.FileMarks - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ValuesFrom != nil { - in, out := &in.ValuesFrom, &out.ValuesFrom - *out = make([]AppTemplateValuesSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplateYtt. -func (in *AppTemplateYtt) DeepCopy() *AppTemplateYtt { - if in == nil { - return nil - } - out := new(AppTemplateYtt) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AssociatedResources) DeepCopyInto(out *AssociatedResources) { - *out = *in - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.GroupKinds != nil { - in, out := &in.GroupKinds, &out.GroupKinds - *out = make([]v1.GroupKind, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociatedResources. -func (in *AssociatedResources) DeepCopy() *AssociatedResources { - if in == nil { - return nil - } - out := new(AssociatedResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericStatus) DeepCopyInto(out *GenericStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]Condition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericStatus. -func (in *GenericStatus) DeepCopy() *GenericStatus { - if in == nil { - return nil - } - out := new(GenericStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KappDeployStatus) DeepCopyInto(out *KappDeployStatus) { - *out = *in - in.AssociatedResources.DeepCopyInto(&out.AssociatedResources) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KappDeployStatus. -func (in *KappDeployStatus) DeepCopy() *KappDeployStatus { - if in == nil { - return nil - } - out := new(KappDeployStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesAPIs) DeepCopyInto(out *KubernetesAPIs) { - *out = *in - if in.GroupVersions != nil { - in, out := &in.GroupVersions, &out.GroupVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAPIs. -func (in *KubernetesAPIs) DeepCopy() *KubernetesAPIs { - if in == nil { - return nil - } - out := new(KubernetesAPIs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Version) DeepCopyInto(out *Version) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Version. -func (in *Version) DeepCopy() *Version { - if in == nil { - return nil - } - out := new(Version) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/doc.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/doc.go deleted file mode 100644 index 746048840..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -// +k8s:deepcopy-gen=package -// +k8s:defaulter-gen=TypeMeta -// +groupName=packaging.carvel.dev -package v1alpha1 diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/package_install.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/package_install.go deleted file mode 100644 index d27d8edbe..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/package_install.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - versions "carvel.dev/vendir/pkg/vendir/versions/v1alpha1" - "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=pkgi,categories={carvel} -// +kubebuilder:printcolumn:name=Package name,JSONPath=.spec.packageRef.refName,description=PackageMetadata name,type=string -// +kubebuilder:printcolumn:name=Package version,JSONPath=.status.version,description=PackageMetadata version,type=string -// +kubebuilder:printcolumn:name=Description,JSONPath=.status.friendlyDescription,description=Friendly description,type=string -// +kubebuilder:printcolumn:name=Age,JSONPath=.metadata.creationTimestamp,description=Time since creation,type=date -// A Package Install is an actual installation of a package and its underlying resources on a Kubernetes cluster. -// It is represented in kapp-controller by a PackageInstall CR. -// A PackageInstall CR must reference a Package CR. -type PackageInstall struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PackageInstallSpec `json:"spec"` - // +optional - Status PackageInstallStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PackageInstallList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []PackageInstall `json:"items"` -} - -type PackageInstallSpec struct { - // Specifies service account that will be used to install underlying package contents - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty"` - // Specifies that Package should be deployed to destination cluster; - // by default, cluster is same as where this resource resides (optional) - // +optional - Cluster *v1alpha1.AppCluster `json:"cluster,omitempty"` - // Specifies the name of the package to install (required) - // +optional - PackageRef *PackageRef `json:"packageRef,omitempty"` - // Values to be included in package's templating step - // (currently only included in the first templating step) (optional) - // +optional - Values []PackageInstallValues `json:"values,omitempty"` - // Paused when set to true will ignore all pending changes, - // once it set back to false, pending changes will be applied - // +optional - Paused bool `json:"paused,omitempty"` - // Canceled when set to true will stop all active changes - // +optional - Canceled bool `json:"canceled,omitempty"` - // Controls frequency of App reconciliation in time + unit - // format. Always >= 30s. If value below 30s is specified, - // 30s will be used. - // +optional - SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` - // When NoopDelete set to true, PackageInstall deletion - // should delete PackageInstall/App CR but preserve App's - // associated resources. - // +optional - NoopDelete bool `json:"noopDelete,omitempty"` - // Specifies the default namespace to install the Package resources, by default this is - // same as the PackageInstall namespace (optional; v0.48.0+) - // +optional - DefaultNamespace string `json:"defaultNamespace,omitempty"` -} - -type PackageRef struct { - // +optional - RefName string `json:"refName,omitempty"` - // +optional - VersionSelection *versions.VersionSelectionSemver `json:"versionSelection,omitempty"` -} - -type PackageInstallValues struct { - // +optional - SecretRef *PackageInstallValuesSecretRef `json:"secretRef,omitempty"` -} - -type PackageInstallValuesSecretRef struct { - // +optional - Name string `json:"name,omitempty"` - // +optional - Key string `json:"key,omitempty"` -} - -type PackageInstallStatus struct { - // +optional - v1alpha1.GenericStatus `json:",inline"` - // TODO this is desired resolved version (not actually deployed) - // +optional - Version string `json:"version,omitempty"` - // LastAttemptedVersion specifies what version was last attempted to be installed. - // It does _not_ indicate it was successfully installed. - // +optional - LastAttemptedVersion string `json:"lastAttemptedVersion,omitempty"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/package_repository.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/package_repository.go deleted file mode 100644 index 0ef821c06..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/package_repository.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=pkgr,categories={carvel} -// +kubebuilder:printcolumn:name=Age,JSONPath=.metadata.creationTimestamp,description=Time since creation,type=date -// +kubebuilder:printcolumn:name=Description,JSONPath=.status.friendlyDescription,description=Friendly description,type=string -// A package repository is a collection of packages and their metadata. -// Similar to a maven repository or a rpm repository, adding a package repository to a cluster gives users of that cluster the ability to install any of the packages from that repository. -type PackageRepository struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PackageRepositorySpec `json:"spec"` - // +optional - Status PackageRepositoryStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PackageRepositoryList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []PackageRepository `json:"items"` -} - -type PackageRepositorySpec struct { - // Paused when set to true will ignore all pending changes, - // once it set back to false, pending changes will be applied - // +optional - Paused bool `json:"paused,omitempty"` - // Controls frequency of PackageRepository reconciliation - // +optional - SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` - - Fetch *PackageRepositoryFetch `json:"fetch"` -} - -type PackageRepositoryFetch struct { - // Image url; unqualified, tagged, or - // digest references supported (required) - // +optional - Image *v1alpha1.AppFetchImage `json:"image,omitempty"` - // Uses http library to fetch file containing packages - // +optional - HTTP *v1alpha1.AppFetchHTTP `json:"http,omitempty"` - // Uses git to clone repository containing package list - // +optional - Git *v1alpha1.AppFetchGit `json:"git,omitempty"` - // Pulls imgpkg bundle from Docker/OCI registry - // +optional - ImgpkgBundle *v1alpha1.AppFetchImgpkgBundle `json:"imgpkgBundle,omitempty"` - // Pull content from within this resource; or other resources in the cluster - // +optional - Inline *v1alpha1.AppFetchInline `json:"inline,omitempty"` -} - -type PackageRepositoryStatus struct { - // +optional - Fetch *v1alpha1.AppStatusFetch `json:"fetch,omitempty"` - // +optional - Template *v1alpha1.AppStatusTemplate `json:"template,omitempty"` - // +optional - Deploy *v1alpha1.AppStatusDeploy `json:"deploy,omitempty"` - // +optional - ConsecutiveReconcileSuccesses int `json:"consecutiveReconcileSuccesses,omitempty"` - // +optional - ConsecutiveReconcileFailures int `json:"consecutiveReconcileFailures,omitempty"` - // +optional - v1alpha1.GenericStatus `json:",inline"` -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/register.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/register.go deleted file mode 100644 index d718b8db5..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/register.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2024 The Carvel Authors. -// SPDX-License-Identifier: Apache-2.0 - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var SchemeGroupVersion = schema.GroupVersion{Group: "packaging.carvel.dev", Version: "v1alpha1"} - -var ( - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - localSchemeBuilder.Register(func(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, &PackageRepository{}, &PackageRepositoryList{}) - scheme.AddKnownTypes(SchemeGroupVersion, &PackageInstall{}, &PackageInstallList{}) - scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil - }) -} - -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 405ddc200..000000000 --- a/vendor/github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,347 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -import ( - versionsv1alpha1 "carvel.dev/vendir/pkg/vendir/versions/v1alpha1" - kappctrlv1alpha1 "github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageInstall) DeepCopyInto(out *PackageInstall) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInstall. -func (in *PackageInstall) DeepCopy() *PackageInstall { - if in == nil { - return nil - } - out := new(PackageInstall) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PackageInstall) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageInstallList) DeepCopyInto(out *PackageInstallList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PackageInstall, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInstallList. -func (in *PackageInstallList) DeepCopy() *PackageInstallList { - if in == nil { - return nil - } - out := new(PackageInstallList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PackageInstallList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageInstallSpec) DeepCopyInto(out *PackageInstallSpec) { - *out = *in - if in.Cluster != nil { - in, out := &in.Cluster, &out.Cluster - *out = new(kappctrlv1alpha1.AppCluster) - (*in).DeepCopyInto(*out) - } - if in.PackageRef != nil { - in, out := &in.PackageRef, &out.PackageRef - *out = new(PackageRef) - (*in).DeepCopyInto(*out) - } - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]PackageInstallValues, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SyncPeriod != nil { - in, out := &in.SyncPeriod, &out.SyncPeriod - *out = new(v1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInstallSpec. -func (in *PackageInstallSpec) DeepCopy() *PackageInstallSpec { - if in == nil { - return nil - } - out := new(PackageInstallSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageInstallStatus) DeepCopyInto(out *PackageInstallStatus) { - *out = *in - in.GenericStatus.DeepCopyInto(&out.GenericStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInstallStatus. -func (in *PackageInstallStatus) DeepCopy() *PackageInstallStatus { - if in == nil { - return nil - } - out := new(PackageInstallStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageInstallValues) DeepCopyInto(out *PackageInstallValues) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(PackageInstallValuesSecretRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInstallValues. -func (in *PackageInstallValues) DeepCopy() *PackageInstallValues { - if in == nil { - return nil - } - out := new(PackageInstallValues) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageInstallValuesSecretRef) DeepCopyInto(out *PackageInstallValuesSecretRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInstallValuesSecretRef. -func (in *PackageInstallValuesSecretRef) DeepCopy() *PackageInstallValuesSecretRef { - if in == nil { - return nil - } - out := new(PackageInstallValuesSecretRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRef) DeepCopyInto(out *PackageRef) { - *out = *in - if in.VersionSelection != nil { - in, out := &in.VersionSelection, &out.VersionSelection - *out = new(versionsv1alpha1.VersionSelectionSemver) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRef. -func (in *PackageRef) DeepCopy() *PackageRef { - if in == nil { - return nil - } - out := new(PackageRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRepository) DeepCopyInto(out *PackageRepository) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRepository. -func (in *PackageRepository) DeepCopy() *PackageRepository { - if in == nil { - return nil - } - out := new(PackageRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PackageRepository) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRepositoryFetch) DeepCopyInto(out *PackageRepositoryFetch) { - *out = *in - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(kappctrlv1alpha1.AppFetchImage) - (*in).DeepCopyInto(*out) - } - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(kappctrlv1alpha1.AppFetchHTTP) - (*in).DeepCopyInto(*out) - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(kappctrlv1alpha1.AppFetchGit) - (*in).DeepCopyInto(*out) - } - if in.ImgpkgBundle != nil { - in, out := &in.ImgpkgBundle, &out.ImgpkgBundle - *out = new(kappctrlv1alpha1.AppFetchImgpkgBundle) - (*in).DeepCopyInto(*out) - } - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(kappctrlv1alpha1.AppFetchInline) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRepositoryFetch. -func (in *PackageRepositoryFetch) DeepCopy() *PackageRepositoryFetch { - if in == nil { - return nil - } - out := new(PackageRepositoryFetch) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRepositoryList) DeepCopyInto(out *PackageRepositoryList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PackageRepository, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRepositoryList. -func (in *PackageRepositoryList) DeepCopy() *PackageRepositoryList { - if in == nil { - return nil - } - out := new(PackageRepositoryList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PackageRepositoryList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRepositorySpec) DeepCopyInto(out *PackageRepositorySpec) { - *out = *in - if in.SyncPeriod != nil { - in, out := &in.SyncPeriod, &out.SyncPeriod - *out = new(v1.Duration) - **out = **in - } - if in.Fetch != nil { - in, out := &in.Fetch, &out.Fetch - *out = new(PackageRepositoryFetch) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRepositorySpec. -func (in *PackageRepositorySpec) DeepCopy() *PackageRepositorySpec { - if in == nil { - return nil - } - out := new(PackageRepositorySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRepositoryStatus) DeepCopyInto(out *PackageRepositoryStatus) { - *out = *in - if in.Fetch != nil { - in, out := &in.Fetch, &out.Fetch - *out = new(kappctrlv1alpha1.AppStatusFetch) - (*in).DeepCopyInto(*out) - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(kappctrlv1alpha1.AppStatusTemplate) - (*in).DeepCopyInto(*out) - } - if in.Deploy != nil { - in, out := &in.Deploy, &out.Deploy - *out = new(kappctrlv1alpha1.AppStatusDeploy) - (*in).DeepCopyInto(*out) - } - in.GenericStatus.DeepCopyInto(&out.GenericStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRepositoryStatus. -func (in *PackageRepositoryStatus) DeepCopy() *PackageRepositoryStatus { - if in == nil { - return nil - } - out := new(PackageRepositoryStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index cf66309c4..db1c95fab 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b86793..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a90..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7434b8784..b640deb0e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2233,25 +2233,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2265,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2283,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2299,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -3270,12 +3225,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f2c166b61..f26356b9c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1286,6 +1286,19 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + func (cc *ClientConn) decrStreamReservations() { cc.mu.Lock() defer cc.mu.Unlock() @@ -1310,7 +1323,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) reqCancel: req.Cancel, isHead: req.Method == "HEAD", reqBody: req.Body, - reqBodyContentLength: httpcommon.ActualContentLength(req), + reqBodyContentLength: actualContentLength(req), trace: httptrace.ContextClientTrace(ctx), peerClosed: make(chan struct{}), abort: make(chan struct{}), @@ -1318,7 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression()) + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1349,7 +1362,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) } res.Request = req res.TLS = cc.tlsState - if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 { + if res.Body == noBody && actualContentLength(req) == 0 { // If there isn't a request or response body still being // written, then wait for the stream to be closed before // RoundTrip returns. @@ -1596,12 +1609,7 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) cc.hbuf.Reset() - res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{ - Request: req, - AddGzipHeader: cs.requestedGzip, - PeerMaxHeaderListSize: cc.peerMaxHeaderListSize, - DefaultUserAgent: defaultUserAgent, - }, func(name, value string) { + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { cc.writeHeader(name, value) }) if err != nil { @@ -1617,6 +1625,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -2186,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2278,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } diff --git a/vendor/golang.org/x/net/internal/httpcommon/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go index ad3fbacd6..92483d8e4 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -5,7 +5,7 @@ package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,7 +82,7 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } @@ -104,7 +104,7 @@ func CanonicalHeader(v string) string { if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) } // CachedCanonicalHeader returns the canonical form of a well-known header name. diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go index 343914773..4b7055317 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -5,10 +5,12 @@ package httpcommon import ( + "context" "errors" "fmt" - "net/http" "net/http/httptrace" + "net/textproto" + "net/url" "sort" "strconv" "strings" @@ -21,9 +23,21 @@ var ( ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") ) +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + // EncodeHeadersParam is parameters to EncodeHeaders. type EncodeHeadersParam struct { - Request *http.Request + Request Request // AddGzipHeader indicates that an "accept-encoding: gzip" header should be // added to the request. @@ -47,11 +61,11 @@ type EncodeHeadersResult struct { // It validates a request and calls headerf with each pseudo-header and header // for the request. // The headerf function is called with the validated, canonicalized header name. -func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { req := param.Request // Check for invalid connection-level headers. - if err := checkConnHeaders(req); err != nil { + if err := checkConnHeaders(req.Header); err != nil { return res, err } @@ -73,7 +87,10 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( // isNormalConnect is true if this is a non-extended CONNECT request. isNormalConnect := false - protocol := req.Header.Get(":protocol") + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } if req.Method == "CONNECT" && protocol == "" { isNormalConnect = true } else if protocol != "" && req.Method != "CONNECT" { @@ -107,9 +124,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( return res, fmt.Errorf("invalid HTTP trailer %s", err) } - contentLength := ActualContentLength(req) - - trailers, err := commaSeparatedTrailers(req) + trailers, err := commaSeparatedTrailers(req.Trailer) if err != nil { return res, err } @@ -123,7 +138,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( f(":authority", host) m := req.Method if m == "" { - m = http.MethodGet + m = "GET" } f(":method", m) if !isNormalConnect { @@ -198,8 +213,8 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( f(k, v) } } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) } if param.AddGzipHeader { f("accept-encoding", "gzip") @@ -225,7 +240,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( } } - trace := httptrace.ContextClientTrace(req.Context()) + trace := httptrace.ContextClientTrace(ctx) // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { @@ -243,19 +258,19 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( } }) - res.HasBody = contentLength != 0 + res.HasBody = req.ActualContentLength != 0 res.HasTrailers = trailers != "" return res, nil } // IsRequestGzip reports whether we should add an Accept-Encoding: gzip header // for a request. -func IsRequestGzip(req *http.Request, disableCompression bool) bool { +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !disableCompression && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: https://zlib.net/zlib_faq.html#faq39 @@ -280,22 +295,22 @@ func IsRequestGzip(req *http.Request, disableCompression bool) bool { // // Certain headers are special-cased as okay but not transmitted later. // For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"]) +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { return fmt.Errorf("invalid Connection request header: %q", vv) } return nil } -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { k = CanonicalHeader(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": @@ -310,19 +325,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { return "", nil } -// ActualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func ActualContentLength(req *http.Request) int64 { - if req.Body == nil || req.Body == http.NoBody { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // @@ -340,7 +342,7 @@ func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } -func validateHeaders(hdrs http.Header) string { +func validateHeaders(hdrs map[string][]string) string { for k, vv := range hdrs { if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) @@ -377,3 +379,89 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { return false } } + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6e..32bdf435e 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b8322598a..a4ea5d14f 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b6..000000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434..000000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go index cd94c5dc4..1aadcf407 100644 --- a/vendor/golang.org/x/text/internal/number/format.go +++ b/vendor/golang.org/x/text/internal/number/format.go @@ -394,9 +394,7 @@ func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, p exp := n.Exp - int32(n.Comma) exponential := f.Symbol(SymExponential) if exponential == "E" { - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = f.AppendDigit(dst, 1) dst = f.AppendDigit(dst, 0) switch { diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 4d57222e7..053336e28 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 3a73084a5..a7df4d1fe 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -45,7 +45,7 @@ type Analyzer struct { // To pass analysis results between packages (and thus // potentially between address spaces), use Facts, which are // serializable. - Run func(*Pass) (interface{}, error) + Run func(*Pass) (any, error) // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a @@ -112,7 +112,7 @@ type Pass struct { // The map keys are the elements of Analysis.Required, // and the type of each corresponding value is the required // analysis's ResultType. - ResultOf map[*Analyzer]interface{} + ResultOf map[*Analyzer]any // ReadFile returns the contents of the named file. // @@ -186,7 +186,7 @@ type ObjectFact struct { // Reportf is a helper function that reports a Diagnostic using the // specified position and formatted error message. -func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { +func (pass *Pass) Reportf(pos token.Pos, format string, args ...any) { msg := fmt.Sprintf(format, args...) pass.Report(Diagnostic{Pos: pos, Message: msg}) } @@ -201,7 +201,7 @@ type Range interface { // ReportRangef is a helper function that reports a Diagnostic using the // range provided. ast.Node values can be passed in as the range because // they satisfy the Range interface. -func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { +func (pass *Pass) ReportRangef(rng Range, format string, args ...any) { msg := fmt.Sprintf(format, args...) pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) } diff --git a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go index 775fd2009..143b42603 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go +++ b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go @@ -12,6 +12,7 @@ import ( "go/token" "go/types" "log" + "maps" "os" "path/filepath" "regexp" @@ -75,19 +76,27 @@ var TestData = func() string { // Testing is an abstraction of a *testing.T. type Testing interface { - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) } -// RunWithSuggestedFixes behaves like Run, but additionally verifies suggested fixes. -// It uses golden files placed alongside the source code under analysis: -// suggested fixes for code in example.go will be compared against example.go.golden. +// RunWithSuggestedFixes behaves like Run, but additionally applies +// suggested fixes and verifies their output. // -// Golden files can be formatted in one of two ways: as plain Go source code, or as txtar archives. -// In the first case, all suggested fixes will be applied to the original source, which will then be compared against the golden file. -// In the second case, suggested fixes will be grouped by their messages, and each set of fixes will be applied and tested separately. -// Each section in the archive corresponds to a single message. +// It uses golden files, placed alongside each source file, to express +// the desired output: the expected transformation of file example.go +// is specified in file example.go.golden. // -// A golden file using txtar may look like this: +// Golden files may be of two forms: a plain Go source file, or a +// txtar archive. +// +// A plain Go source file indicates the expected result of applying +// all suggested fixes to the original file. +// +// A txtar archive specifies, in each section, the expected result of +// applying all suggested fixes of a given message to the original +// file; the name of the archive section is the fix's message. In this +// way, the various alternative fixes offered by a single diagnostic +// can be tested independently. Here's an example: // // -- turn into single negation -- // package pkg @@ -109,41 +118,28 @@ type Testing interface { // // # Conflicts // -// A single analysis pass may offer two or more suggested fixes that -// (1) conflict but are nonetheless logically composable, (e.g. -// because both update the import declaration), or (2) are -// fundamentally incompatible (e.g. alternative fixes to the same -// statement). +// Regardless of the form of the golden file, it is possible for +// multiple fixes to conflict, either because they overlap, or are +// close enough together that the particular diff algorithm cannot +// separate them. // -// It is up to the driver to decide how to apply such fixes. A -// sophisticated driver could attempt to resolve conflicts of the -// first kind, but this test driver simply reports the fact of the -// conflict with the expectation that the user will split their tests -// into nonconflicting parts. +// RunWithSuggestedFixes uses a simple three-way merge to accumulate +// fixes, similar to a git merge. The merge algorithm may be able to +// coalesce identical edits, for example duplicate imports of the same +// package. (Bear in mind that this is an editorial decision. In +// general, coalescing identical edits may not be correct: consider +// two statements that increment the same counter.) // -// Conflicts of the second kind can be avoided by giving the -// alternative fixes different names (SuggestedFix.Message) and -// defining the .golden file as a multi-section txtar file with a -// named section for each alternative fix, as shown above. +// If there are conflicts, the test fails. In any case, the +// non-conflicting edits will be compared against the expected output. +// In this situation, we recommend that you increase the textual +// separation between conflicting parts or, if that fails, split +// your tests into smaller parts. // -// Analyzers that compute fixes from a textual diff of the -// before/after file contents (instead of directly from syntax tree -// positions) may produce fixes that, although logically -// non-conflicting, nonetheless conflict due to the particulars of the -// diff algorithm. In such cases it may suffice to introduce -// sufficient separation of the statements in the test input so that -// the computed diffs do not overlap. If that fails, break the test -// into smaller parts. -// -// TODO(adonovan): the behavior of RunWithSuggestedFixes as documented -// above is impractical for tests that report multiple diagnostics and -// offer multiple alternative fixes for the same diagnostic, and it is -// inconsistent with the interpretation of multiple diagnostics -// described at Diagnostic.SuggestedFixes. -// We need to rethink the analyzer testing API to better support such -// cases. In the meantime, users of RunWithSuggestedFixes testing -// analyzers that offer alternative fixes are advised to put each fix -// in a separate .go file in the testdata. +// If a diagnostic offers multiple fixes for the same problem, they +// are almost certain to conflict, so in this case you should define +// the expected output using a multi-section txtar file as described +// above. func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { results := Run(t, dir, a, patterns...) @@ -173,133 +169,165 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns for _, result := range results { act := result.Action - // file -> message -> edits - // TODO(adonovan): this mapping assumes fix.Messages are unique across analyzers, - // whereas they are only unique within a given Diagnostic. - fileEdits := make(map[*token.File]map[string][]diff.Edit) - - // We may assume that fixes are validated upon creation in Pass.Report. - // Group fixes by file and message. + // For each fix, split its edits by file and convert to diff form. + var ( + // fixEdits: message -> fixes -> filename -> edits + // + // TODO(adonovan): this mapping assumes fix.Messages + // are unique across analyzers, whereas they are only + // unique within a given Diagnostic. + fixEdits = make(map[string][]map[string][]diff.Edit) + allFilenames = make(map[string]bool) + ) for _, diag := range act.Diagnostics { + // Fixes are validated upon creation in Pass.Report. for _, fix := range diag.SuggestedFixes { // Assert that lazy fixes have a Category (#65578, #65087). if inTools && len(fix.TextEdits) == 0 && diag.Category == "" { t.Errorf("missing Diagnostic.Category for SuggestedFix without TextEdits (gopls requires the category for the name of the fix command") } + // Convert edits to diff form. + // Group fixes by message and file. + edits := make(map[string][]diff.Edit) for _, edit := range fix.TextEdits { file := act.Package.Fset.File(edit.Pos) - if _, ok := fileEdits[file]; !ok { - fileEdits[file] = make(map[string][]diff.Edit) - } - fileEdits[file][fix.Message] = append(fileEdits[file][fix.Message], diff.Edit{ + allFilenames[file.Name()] = true + edits[file.Name()] = append(edits[file.Name()], diff.Edit{ Start: file.Offset(edit.Pos), End: file.Offset(edit.End), New: string(edit.NewText), }) } + fixEdits[fix.Message] = append(fixEdits[fix.Message], edits) + } + } + + merge := func(file, message string, x, y []diff.Edit) []diff.Edit { + z, ok := diff.Merge(x, y) + if !ok { + t.Errorf("in file %s, conflict applying fix %q", file, message) + return x // discard y } + return z } - for file, fixes := range fileEdits { - // Get the original file contents. - // TODO(adonovan): plumb pass.ReadFile. - orig, err := os.ReadFile(file.Name()) + // Because the checking is driven by original + // filenames, there is no way to express that a fix + // (e.g. extract declaration) creates a new file. + for _, filename := range slices.Sorted(maps.Keys(allFilenames)) { + // Read the original file. + content, err := os.ReadFile(filename) if err != nil { - t.Errorf("error reading %s: %v", file.Name(), err) + t.Errorf("error reading %s: %v", filename, err) continue } - // Get the golden file and read the contents. - ar, err := txtar.ParseFile(file.Name() + ".golden") + // check checks that the accumulated edits applied + // to the original content yield the wanted content. + check := func(prefix string, accumulated []diff.Edit, want []byte) { + if err := applyDiffsAndCompare(filename, content, want, accumulated); err != nil { + t.Errorf("%s: %s", prefix, err) + } + } + + // Read the golden file. It may have one of two forms: + // (1) A txtar archive with one section per fix title, + // including all fixes of just that title. + // (2) The expected output for file.Name after all (?) fixes are applied. + // This form requires that no diagnostic has multiple fixes. + ar, err := txtar.ParseFile(filename + ".golden") if err != nil { - t.Errorf("error reading %s.golden: %v", file.Name(), err) + t.Errorf("error reading %s.golden: %v", filename, err) continue } - if len(ar.Files) > 0 { - // one virtual file per kind of suggested fix - - if len(ar.Comment) != 0 { - // we allow either just the comment, or just virtual - // files, not both. it is not clear how "both" should - // behave. - t.Errorf("%s.golden has leading comment; we don't know what to do with it", file.Name()) + // Form #1: one archive section per kind of suggested fix. + if len(ar.Comment) > 0 { + // Disallow the combination of comment and archive sections. + t.Errorf("%s.golden has leading comment; we don't know what to do with it", filename) continue } - // Sort map keys for determinism in tests. - // TODO(jba): replace with slices.Sorted(maps.Keys(fixes)) when go.mod >= 1.23. - var keys []string - for k := range fixes { - keys = append(keys, k) - } - slices.Sort(keys) - for _, sf := range keys { - edits := fixes[sf] - found := false - for _, vf := range ar.Files { - if vf.Name == sf { - found = true - // the file may contain multiple trailing - // newlines if the user places empty lines - // between files in the archive. normalize - // this to a single newline. - golden := append(bytes.TrimRight(vf.Data, "\n"), '\n') - - if err := applyDiffsAndCompare(orig, golden, edits, file.Name()); err != nil { - t.Errorf("%s", err) - } - break - } - } - if !found { - t.Errorf("no section for suggested fix %q in %s.golden", sf, file.Name()) + + // Each archive section is named for a fix.Message. + // Accumulate the parts of the fix that apply to the current file, + // using a simple three-way merge, discarding conflicts, + // then apply the merged edits and compare to the archive section. + for _, section := range ar.Files { + message, want := section.Name, section.Data + var accumulated []diff.Edit + for _, fix := range fixEdits[message] { + accumulated = merge(filename, message, accumulated, fix[filename]) } - } - } else { - // all suggested fixes are represented by a single file - // TODO(adonovan): fix: this makes no sense if len(fixes) > 1. - // Sort map keys for determinism in tests. - // TODO(jba): replace with slices.Sorted(maps.Keys(fixes)) when go.mod >= 1.23. - var keys []string - for k := range fixes { - keys = append(keys, k) - } - slices.Sort(keys) - var catchallEdits []diff.Edit - for _, k := range keys { - catchallEdits = append(catchallEdits, fixes[k]...) + check(fmt.Sprintf("all fixes of message %q", message), accumulated, want) } - if err := applyDiffsAndCompare(orig, ar.Comment, catchallEdits, file.Name()); err != nil { - t.Errorf("%s", err) + } else { + // Form #2: all suggested fixes are represented by a single file. + want := ar.Comment + var accumulated []diff.Edit + for _, message := range slices.Sorted(maps.Keys(fixEdits)) { + for _, fix := range fixEdits[message] { + accumulated = merge(filename, message, accumulated, fix[filename]) + } } + check("all fixes", accumulated, want) } } } + return results } -// applyDiffsAndCompare applies edits to src and compares the results against -// golden after formatting both. fileName is use solely for error reporting. -func applyDiffsAndCompare(src, golden []byte, edits []diff.Edit, fileName string) error { - out, err := diff.ApplyBytes(src, edits) +// applyDiffsAndCompare applies edits to original and compares the results against +// want after formatting both. fileName is use solely for error reporting. +func applyDiffsAndCompare(filename string, original, want []byte, edits []diff.Edit) error { + // Relativize filename, for tidier errors. + if cwd, err := os.Getwd(); err == nil { + if rel, err := filepath.Rel(cwd, filename); err == nil { + filename = rel + } + } + + if len(edits) == 0 { + return fmt.Errorf("%s: no edits", filename) + } + fixedBytes, err := diff.ApplyBytes(original, edits) if err != nil { - return fmt.Errorf("%s: error applying fixes: %v (see possible explanations at RunWithSuggestedFixes)", fileName, err) + return fmt.Errorf("%s: error applying fixes: %v (see possible explanations at RunWithSuggestedFixes)", filename, err) } - wantRaw, err := format.Source(golden) + fixed, err := format.Source(fixedBytes) if err != nil { - return fmt.Errorf("%s.golden: error formatting golden file: %v\n%s", fileName, err, out) + return fmt.Errorf("%s: error formatting resulting source: %v\n%s", filename, err, fixed) } - want := string(wantRaw) - formatted, err := format.Source(out) + want, err = format.Source(want) if err != nil { - return fmt.Errorf("%s: error formatting resulting source: %v\n%s", fileName, err, out) - } - if got := string(formatted); got != want { - unified := diff.Unified(fileName+".golden", "actual", want, got) - return fmt.Errorf("suggested fixes failed for %s:\n%s", fileName, unified) + return fmt.Errorf("%s.golden: error formatting golden file: %v\n%s", filename, err, fixed) + } + + // Keep error reporting logic below consistent with + // TestScript in ../internal/checker/fix_test.go! + + unified := func(xlabel, ylabel string, x, y []byte) string { + x = append(slices.Clip(bytes.TrimSpace(x)), '\n') + y = append(slices.Clip(bytes.TrimSpace(y)), '\n') + return diff.Unified(xlabel, ylabel, string(x), string(y)) + } + + if diff := unified(filename+" (fixed)", filename+" (want)", fixed, want); diff != "" { + return fmt.Errorf("unexpected %s content:\n"+ + "-- original --\n%s\n"+ + "-- fixed --\n%s\n"+ + "-- want --\n%s\n"+ + "-- diff original fixed --\n%s\n"+ + "-- diff fixed want --\n%s", + filename, + original, + fixed, + want, + unified(filename+" (original)", filename+" (fixed)", original, fixed), + diff) } return nil } diff --git a/vendor/golang.org/x/tools/go/analysis/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/checker/checker.go index 502ec9221..94808733b 100644 --- a/vendor/golang.org/x/tools/go/analysis/checker/checker.go +++ b/vendor/golang.org/x/tools/go/analysis/checker/checker.go @@ -594,7 +594,7 @@ func (act *Action) exportPackageFact(fact analysis.Fact) { func factType(fact analysis.Fact) reflect.Type { t := reflect.TypeOf(fact) - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { log.Fatalf("invalid Fact type: got %T, want pointer", fact) } return t diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go index c2445575c..6aefef258 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -201,7 +201,7 @@ func addVersionFlag() { type versionFlag struct{} func (versionFlag) IsBoolFlag() bool { return true } -func (versionFlag) Get() interface{} { return nil } +func (versionFlag) Get() any { return nil } func (versionFlag) String() string { return "" } func (versionFlag) Set(s string) error { if s != "full" { @@ -252,7 +252,7 @@ const ( // triState implements flag.Value, flag.Getter, and flag.boolFlag. // They work like boolean flags: we can say vet -printf as well as vet -printf=true -func (ts *triState) Get() interface{} { +func (ts *triState) Get() any { return *ts == setTrue } @@ -340,7 +340,7 @@ func PrintPlain(out io.Writer, fset *token.FileSet, contextLines int, diag analy // A JSONTree is a mapping from package ID to analysis name to result. // Each result is either a jsonError or a list of JSONDiagnostic. -type JSONTree map[string]map[string]interface{} +type JSONTree map[string]map[string]any // A TextEdit describes the replacement of a portion of a file. // Start and End are zero-based half-open indices into the original byte @@ -383,7 +383,7 @@ type JSONRelatedInformation struct { // Add adds the result of analysis 'name' on package 'id'. // The result is either a list of diagnostics or an error. func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { - var v interface{} + var v any if err != nil { type jsonError struct { Err string `json:"error"` @@ -429,7 +429,7 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis. if v != nil { m, ok := tree[id] if !ok { - m = make(map[string]interface{}) + m = make(map[string]any) tree[id] = m } m[name] = v diff --git a/vendor/golang.org/x/tools/go/analysis/internal/internal.go b/vendor/golang.org/x/tools/go/analysis/internal/internal.go index e7c8247fd..327c4b505 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/internal.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/internal.go @@ -9,4 +9,4 @@ import "golang.org/x/tools/go/analysis" // This function is set by the checker package to provide // backdoor access to the private Pass field // of the checker.Action type, for use by analysistest. -var Pass func(interface{}) *analysis.Pass +var Pass func(any) *analysis.Pass diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 82c3db6a3..a1ee80388 100644 --- a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -287,7 +287,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re // Also build a map to hold working state and result. type action struct { once sync.Once - result interface{} + result any err error usesFacts bool // (transitively uses) diagnostics []analysis.Diagnostic @@ -337,7 +337,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re // The inputs to this analysis are the // results of its prerequisites. - inputs := make(map[*analysis.Analyzer]interface{}) + inputs := make(map[*analysis.Analyzer]any) var failed []string for _, req := range a.Requires { reqact := exec(req) diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go index 4f2c40456..145393921 100644 --- a/vendor/golang.org/x/tools/go/analysis/validate.go +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -63,7 +63,7 @@ func Validate(analyzers []*Analyzer) error { return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", t, a, prev) } - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { return fmt.Errorf("%s: fact type %s is not a pointer", a, t) } factTypes[t] = a diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index c3a59b8eb..6665a04c1 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -141,6 +141,8 @@ const ( LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) @@ -161,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -564,13 +566,13 @@ type ModuleError struct { } func init() { - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { + packagesinternal.SetModFile = func(config any, value string) { config.(*Config).modFile = value } - packagesinternal.SetModFlag = func(config interface{}, value string) { + packagesinternal.SetModFlag = func(config any, value string) { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) @@ -739,7 +741,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 43261147c..b6d542c64 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -389,8 +389,13 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 { // path, and whether or not it is a package-level typename. It // is rare for a package to define multiple local types with // the same name.) - hash := uintptr(unsafe.Pointer(tname)) - return uint32(hash ^ (hash >> 32)) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } } // shallowHash computes a hash of t without looking at any of its diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index abf708111..5eb7ac5a9 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -23,6 +23,8 @@ import ( "golang.org/x/tools/internal/typesinternal" ) +// Deprecated: this heuristic is ill-defined. +// TODO(adonovan): move to sole use in gopls/internal/cache. func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. file := fset.File(start) @@ -255,16 +257,16 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member newName = fmt.Sprintf("%s%d", preferredName, i) } - // For now, keep it real simple: create a new import - // declaration before the first existing declaration (which - // must exist), including its comments, and let goimports tidy it up. + // Create a new import declaration either before the first existing + // declaration (which must exist), including its comments; or + // inside the declaration, if it is an import group. // // Use a renaming import whenever the preferred name is not // available, or the chosen name does not match the last // segment of its path. - newText := fmt.Sprintf("import %q\n\n", pkgpath) + newText := fmt.Sprintf("%q", pkgpath) if newName != preferredName || newName != pathpkg.Base(pkgpath) { - newText = fmt.Sprintf("import %s %q\n\n", newName, pkgpath) + newText = fmt.Sprintf("%s %q", newName, pkgpath) } decl0 := file.Decls[0] var before ast.Node = decl0 @@ -278,9 +280,17 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member before = decl0.Doc } } + // If the first decl is an import group, add this new import at the end. + if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { + pos = gd.Rparen + newText = "\t" + newText + "\n" + } else { + pos = before.Pos() + newText = "import " + newText + "\n\n" + } return newName, newName + ".", []analysis.TextEdit{{ - Pos: before.Pos(), - End: before.Pos(), + Pos: pos, + End: pos, NewText: []byte(newText), }} } @@ -409,18 +419,19 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { start := edit.Pos file := fset.File(start) if file == nil { - return fmt.Errorf("missing file info for pos (%v)", edit.Pos) + return fmt.Errorf("no token.File for TextEdit.Pos (%v)", edit.Pos) } if end := edit.End; end.IsValid() { if end < start { - return fmt.Errorf("pos (%v) > end (%v)", edit.Pos, edit.End) + return fmt.Errorf("TextEdit.Pos (%v) > TextEdit.End (%v)", edit.Pos, edit.End) } endFile := fset.File(end) if endFile == nil { - return fmt.Errorf("malformed end position %v", end) + return fmt.Errorf("no token.File for TextEdit.End (%v; File(start).FileEnd is %d)", end, file.Base()+file.Size()) } if endFile != file { - return fmt.Errorf("edit spans files %v and %v", file.Name(), endFile.Name()) + return fmt.Errorf("edit #%d spans files (%v and %v)", + i, file.Position(edit.Pos), endFile.Position(edit.End)) } } else { edit.End = start // update the SuggestedFix @@ -449,3 +460,30 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { return nil } + +// CanImport reports whether one package is allowed to import another. +// +// TODO(adonovan): allow customization of the accessibility relation +// (e.g. for Bazel). +func CanImport(from, to string) bool { + // TODO(adonovan): better segment hygiene. + if to == "internal" || strings.HasPrefix(to, "internal/") { + // Special case: only std packages may import internal/... + // We can't reliably know whether we're in std, so we + // use a heuristic on the first segment. + first, _, _ := strings.Cut(from, "/") + if strings.Contains(first, ".") { + return false // example.com/foo ∉ std + } + if first == "testdata" { + return false // testdata/foo ∉ std + } + } + if strings.HasSuffix(to, "/internal") { + return strings.HasPrefix(from, to[:len(to)-len("/internal")]) + } + if i := strings.LastIndex(to, "/internal/"); i >= 0 { + return strings.HasPrefix(from, to[:i]) + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e30..4cfa51b61 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f9..7c00ca2a6 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -32,7 +32,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +76,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. diff --git a/vendor/golang.org/x/tools/internal/facts/facts.go b/vendor/golang.org/x/tools/internal/facts/facts.go index e1c18d373..8e2997e6d 100644 --- a/vendor/golang.org/x/tools/internal/facts/facts.go +++ b/vendor/golang.org/x/tools/internal/facts/facts.go @@ -209,7 +209,7 @@ func (d *Decoder) Decode(read func(pkgPath string) ([]byte, error)) (*Set, error // Facts may describe indirectly imported packages, or their objects. m := make(map[key]analysis.Fact) // one big bucket for _, imp := range d.pkg.Imports() { - logf := func(format string, args ...interface{}) { + logf := func(format string, args ...any) { if debug { prefix := fmt.Sprintf("in %s, importing %s: ", d.pkg.Path(), imp.Path()) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d79a605ed..734c46198 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 7dfc31a37..253d6493c 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -310,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -597,7 +597,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -1583,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 129439271..bc6c9741e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -400,7 +400,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 522287d18..37b4a39e9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -574,7 +574,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) typesinternal.SetVarKind(recv, typesinternal.RecvVar) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 784605914..25ebab663 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -17,4 +17,4 @@ var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var SetModFlag = func(config any, value string) {} -var SetModFile = func(config interface{}, value string) {} +var SetModFile = func(config any, value string) {} diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go new file mode 100644 index 000000000..7cca431cd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +type pkginfo struct { + name string + deps string // list of indices of dependencies, as varint-encoded deltas +} + +var deps = [...]pkginfo{ + {"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"}, + {"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"}, + {"bufio", "\x03k}E\x13"}, + {"bytes", "n+R\x03\fG\x02\x02"}, + {"cmp", ""}, + {"compress/bzip2", "\x02\x02\xe7\x01B"}, + {"compress/flate", "\x02l\x03z\r\x024\x01\x03"}, + {"compress/gzip", "\x02\x04a\a\x03\x15eT"}, + {"compress/lzw", "\x02l\x03z"}, + {"compress/zlib", "\x02\x04a\a\x03\x13\x01f"}, + {"container/heap", "\xae\x02"}, + {"container/list", ""}, + {"container/ring", ""}, + {"context", "n\\h\x01\f"}, + {"crypto", "\x84\x01gD"}, + {"crypto/aes", "\x10\n\a\x8e\x02"}, + {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"}, + {"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"}, + {"crypto/dsa", "@\x04*}\x0e"}, + {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"}, + {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"}, + {"crypto/elliptic", "0>}\x0e9"}, + {"crypto/fips140", " \x05\x91\x01"}, + {"crypto/hkdf", "-\x12\x01.\x16"}, + {"crypto/hmac", "\x1a\x14\x11\x01\x113"}, + {"crypto/internal/boring", "\x0e\x02\rg"}, + {"crypto/internal/boring/bbig", "\x1a\xdf\x01L"}, + {"crypto/internal/boring/bcache", "\xb3\x02\x12"}, + {"crypto/internal/boring/sig", ""}, + {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"}, + {"crypto/internal/entropy", "E"}, + {"crypto/internal/fips140", ">0}9\f\x15"}, + {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"}, + {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"}, + {"crypto/internal/fips140/alias", "\xc5\x02"}, + {"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"}, + {"crypto/internal/fips140/check/checktest", "%\xff\x01!"}, + {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"}, + {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"}, + {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"}, + {"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"}, + {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"}, + {"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"}, + {"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"}, + {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/ssh", " \x05"}, + {"crypto/internal/fips140/subtle", "#\x19\xbe\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"}, + {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"}, + {"crypto/internal/fips140deps", ""}, + {"crypto/internal/fips140deps/byteorder", "\x9a\x01"}, + {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, + {"crypto/internal/fips140hash", "5\x1a5\xc1\x01"}, + {"crypto/internal/fips140only", "'\r\x01\x01N25"}, + {"crypto/internal/fips140test", ""}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"}, + {"crypto/internal/impl", "\xb0\x02"}, + {"crypto/internal/randutil", "\xeb\x01\x12"}, + {"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "n"}, + {"crypto/md5", "\x0e2.\x16\x16`"}, + {"crypto/mlkem", "/"}, + {"crypto/pbkdf2", "2\r\x01.\x16"}, + {"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"}, + {"crypto/rc4", "#\x1d.\xc1\x01"}, + {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"}, + {"crypto/sha1", "\x0e\f&.\x16\x16\x14L"}, + {"crypto/sha256", "\x0e\f\x1aP"}, + {"crypto/sha3", "\x0e'O\xc1\x01"}, + {"crypto/sha512", "\x0e\f\x1cN"}, + {"crypto/subtle", "8\x98\x01T"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"}, + {"crypto/tls/internal/fips140tls", " \x93\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"}, + {"crypto/x509/pkix", "d\x06\a\x88\x01F"}, + {"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"}, + {"database/sql/driver", "\ra\x03\xae\x01\x10\x10"}, + {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"}, + {"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"}, + {"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"}, + {"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"}, + {"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"}, + {"debug/plan9obj", "g\a\x03`\x1a,"}, + {"embed", "n+:\x18\x01S"}, + {"embed/internal/embedtest", ""}, + {"encoding", ""}, + {"encoding/ascii85", "\xeb\x01D"}, + {"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"}, + {"encoding/base32", "\xeb\x01B\x02"}, + {"encoding/base64", "\x9a\x01QB\x02"}, + {"encoding/binary", "n}\r'\x0e\x05"}, + {"encoding/csv", "\x02\x01k\x03zE\x11\x02"}, + {"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"}, + {"encoding/hex", "n\x03zB\x03"}, + {"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"}, + {"encoding/pem", "\x03c\b}B\x03"}, + {"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"}, + {"errors", "\xca\x01{"}, + {"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"}, + {"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"}, + {"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"}, + {"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"}, + {"go/ast/internal/tests", ""}, + {"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"}, + {"go/build/constraint", "n\xc1\x01\x01\x11\x02"}, + {"go/constant", "q\x10w\x01\x015\x01\x02\x11"}, + {"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"}, + {"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"}, + {"go/format", "\x03n\x01\f\x01\x02jE"}, + {"go/importer", "t\a\x01\x01\x04\x01i9"}, + {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"}, + {"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"}, + {"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"}, + {"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"}, + {"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"}, + {"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"}, + {"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"}, + {"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"}, + {"go/version", "\xbb\x01u"}, + {"hash", "\xeb\x01"}, + {"hash/adler32", "n\x16\x16"}, + {"hash/crc32", "n\x16\x16\x14\x84\x01\x01"}, + {"hash/crc64", "n\x16\x16\x98\x01"}, + {"hash/fnv", "n\x16\x16`"}, + {"hash/maphash", "\x95\x01\x05\x1b\x03@M"}, + {"html", "\xb0\x02\x02\x11"}, + {"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"}, + {"image", "\x02l\x1f^\x0f5\x03\x01"}, + {"image/color", ""}, + {"image/color/palette", "\x8d\x01"}, + {"image/draw", "\x8c\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"}, + {"image/internal/imageutil", "\x8c\x01"}, + {"image/jpeg", "\x02l\x1e\x01\x04Z"}, + {"image/png", "\x02\a^\n\x13\x02\x06\x01^D"}, + {"index/suffixarray", "\x03d\a}\r*\v\x01"}, + {"internal/abi", "\xb5\x01\x90\x01"}, + {"internal/asan", "\xc5\x02"}, + {"internal/bisect", "\xa4\x02\x0e\x01"}, + {"internal/buildcfg", "qG_\x06\x02\x05\v\x01"}, + {"internal/bytealg", "\xae\x01\x97\x01"}, + {"internal/byteorder", ""}, + {"internal/cfg", ""}, + {"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"}, + {"internal/copyright", ""}, + {"internal/coverage", ""}, + {"internal/coverage/calloc", ""}, + {"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"}, + {"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"}, + {"internal/coverage/cmerge", "q-Z"}, + {"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"}, + {"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"}, + {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"}, + {"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."}, + {"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"}, + {"internal/coverage/rtcov", "\xc5\x02"}, + {"internal/coverage/slicereader", "g\nzZ"}, + {"internal/coverage/slicewriter", "qz"}, + {"internal/coverage/stringtab", "q8\x04>"}, + {"internal/coverage/test", ""}, + {"internal/coverage/uleb128", ""}, + {"internal/cpu", "\xc5\x02"}, + {"internal/dag", "\x04m\xbc\x01\x03"}, + {"internal/diff", "\x03n\xbd\x01\x02"}, + {"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"}, + {"internal/filepathlite", "n+:\x19A"}, + {"internal/fmtsort", "\x04\x9b\x02\x0e"}, + {"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"}, + {"internal/goarch", ""}, + {"internal/godebug", "\x97\x01 {\x01\x12"}, + {"internal/godebugs", ""}, + {"internal/goexperiment", ""}, + {"internal/goos", ""}, + {"internal/goroot", "\x97\x02\x01\x05\x13\x02"}, + {"internal/gover", "\x04"}, + {"internal/goversion", ""}, + {"internal/itoa", ""}, + {"internal/lazyregexp", "\x97\x02\v\x0e\x02"}, + {"internal/lazytemplate", "\xeb\x01,\x19\x02\v"}, + {"internal/msan", "\xc5\x02"}, + {"internal/nettrace", ""}, + {"internal/obscuretestdata", "f\x85\x01,"}, + {"internal/oserror", "n"}, + {"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"}, + {"internal/platform", ""}, + {"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"}, + {"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"}, + {"internal/profilerecord", ""}, + {"internal/race", "\x95\x01\xb0\x01"}, + {"internal/reflectlite", "\x95\x01 3\x01P\x0e\x13\x12"}, + {"unsafe", ""}, + {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"}, + {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"}, + {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, + {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, + {"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"}, + {"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"}, + {"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"}, + {"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03k}X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"}, + {"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"}, + {"weak", "\x95\x01\x8f\x01!"}, +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go new file mode 100644 index 000000000..f6909878a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdlib + +// This file provides the API for the import graph of the standard library. +// +// Be aware that the compiler-generated code for every package +// implicitly depends on package "runtime" and a handful of others +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go). + +import ( + "encoding/binary" + "iter" + "slices" + "strings" +) + +// Imports returns the sequence of packages directly imported by the +// named standard packages, in name order. +// The imports of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Imports(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !yield(deps[depIndex].name) { + return + } + data = data[n:] + } + } + } + } +} + +// Dependencies returns the set of all dependencies of the named +// standard packages, including the initial package, +// in a deterministic topological order. +// The dependencies of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Dependencies(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var seen [1 + len(deps)/8]byte // bit set of seen packages + var visit func(i int) bool + visit = func(i int) bool { + bit := byte(1) << (i % 8) + if seen[i/8]&bit == 0 { + seen[i/8] |= bit + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !visit(int(depIndex)) { + return false + } + data = data[n:] + } + if !yield(deps[i].name) { + return false + } + } + return true + } + if !visit(i) { + return + } + } + } + } +} + +// find returns the index of pkg in the deps table. +func find(pkg string) (int, bool) { + return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int { + return strings.Compare(p.name, n) + }) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 9f0b871ff..00776a31b 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2024 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -2151,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Type).String", Method, 0}, {"(Version).GoString", Method, 0}, {"(Version).String", Method, 0}, + {"(VersionIndex).Index", Method, 24}, + {"(VersionIndex).IsHidden", Method, 24}, {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, {"COMPRESS_HIOS", Const, 6}, {"COMPRESS_HIPROC", Const, 6}, @@ -3834,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{ {"SymType", Type, 0}, {"SymVis", Type, 0}, {"Symbol", Type, 0}, + {"Symbol.HasVersion", Field, 24}, {"Symbol.Info", Field, 0}, {"Symbol.Library", Field, 13}, {"Symbol.Name", Field, 0}, @@ -3843,18 +3846,12 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, {"Symbol.VersionIndex", Field, 24}, - {"Symbol.VersionScope", Field, 24}, - {"SymbolVersionScope", Type, 24}, {"Type", Type, 0}, {"VER_FLG_BASE", Const, 24}, {"VER_FLG_INFO", Const, 24}, {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, - {"VersionScopeGlobal", Const, 24}, - {"VersionScopeHidden", Const, 24}, - {"VersionScopeLocal", Const, 24}, - {"VersionScopeNone", Const, 24}, - {"VersionScopeSpecific", Const, 24}, + {"VersionIndex", Type, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -7122,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{ {"FormatFileInfo", Func, 21}, {"Glob", Func, 16}, {"GlobFS", Type, 16}, + {"Lstat", Func, 25}, {"ModeAppend", Const, 16}, {"ModeCharDevice", Const, 16}, {"ModeDevice", Const, 16}, @@ -7146,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{ {"ReadDirFile", Type, 16}, {"ReadFile", Func, 16}, {"ReadFileFS", Type, 16}, + {"ReadLink", Func, 25}, + {"ReadLinkFS", Type, 25}, {"SkipAll", Var, 20}, {"SkipDir", Var, 16}, {"Stat", Func, 16}, @@ -9149,6 +9149,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Chmod", Method, 25}, + {"(*Root).Chown", Method, 25}, {"(*Root).Close", Method, 24}, {"(*Root).Create", Method, 24}, {"(*Root).FS", Method, 24}, @@ -16757,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{ }, "testing/fstest": { {"(MapFS).Glob", Method, 16}, + {"(MapFS).Lstat", Method, 25}, {"(MapFS).Open", Method, 16}, {"(MapFS).ReadDir", Method, 16}, {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).ReadLink", Method, 25}, {"(MapFS).Stat", Method, 16}, {"(MapFS).Sub", Method, 16}, {"MapFS", Type, 16}, diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index 98904017f..3d96d3bf6 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -6,7 +6,7 @@ // Package stdlib provides a table of all exported symbols in the // standard library, along with the version at which they first -// appeared. +// appeared. It also provides the import graph of std packages. package stdlib import ( diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv.go b/vendor/golang.org/x/tools/internal/testenv/testenv.go index 144f4f8fd..5c541b7b1 100644 --- a/vendor/golang.org/x/tools/internal/testenv/testenv.go +++ b/vendor/golang.org/x/tools/internal/testenv/testenv.go @@ -278,6 +278,16 @@ func NeedsGoBuild(t testing.TB) { NeedsTool(t, "go") } +// NeedsDefaultImporter skips t if the test uses the default importer, +// returned by [go/importer.Default]. +func NeedsDefaultImporter(t testing.TB) { + t.Helper() + // The default importer may call `go list` + // (in src/internal/exportdata/exportdata.go:lookupGorootExport), + // so check for the go tool. + NeedsTool(t, "go") +} + // ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the // current machine is a builder known to have scarce resources. // diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go b/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go index e9ce0d364..85b3820e3 100644 --- a/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go +++ b/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !(unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) -// +build !unix,!aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package testenv diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go b/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go index bc6af1ff8..d635b96b3 100644 --- a/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go +++ b/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris package testenv diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 93c80fdc9..f49802b8e 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -120,7 +120,7 @@ type termSet struct { terms termlist } -func indentf(depth int, format string, args ...interface{}) { +func indentf(depth int, format string, args ...any) { fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 345348796..edf0347ec 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -32,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool { return true } -// ReadGo116ErrorData extracts additional information from types.Error values +// ErrorCodeStartEnd extracts additional information from types.Error values // generated by Go version 1.16 and later: the error code, start position, and // end position. If all positions are valid, start <= err.Pos <= end. // // If the data could not be read, the final result parameter will be false. -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { +// +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted. +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { var data [3]int // By coincidence all of these fields are ints, which simplifies things. v := reflect.ValueOf(err) diff --git a/vendor/modules.txt b/vendor/modules.txt index f9d7a0ccc..1d017c7fe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,20 +1,3 @@ -# carvel.dev/kapp v0.64.0 -## explicit; go 1.23.3 -carvel.dev/kapp/pkg/kapp/cmd/core -carvel.dev/kapp/pkg/kapp/config -carvel.dev/kapp/pkg/kapp/crdupgradesafety -carvel.dev/kapp/pkg/kapp/diffgraph -carvel.dev/kapp/pkg/kapp/logger -carvel.dev/kapp/pkg/kapp/matcher -carvel.dev/kapp/pkg/kapp/preflight -carvel.dev/kapp/pkg/kapp/resources -carvel.dev/kapp/pkg/kapp/resourcesmisc -carvel.dev/kapp/pkg/kapp/util -carvel.dev/kapp/pkg/kapp/version -carvel.dev/kapp/pkg/kapp/yttresmod -# carvel.dev/vendir v0.40.0 -## explicit; go 1.21 -carvel.dev/vendir/pkg/vendir/versions/v1alpha1 # cel.dev/expr v0.18.0 ## explicit; go 1.21.1 cel.dev/expr @@ -290,16 +273,6 @@ github.com/containers/storage/pkg/reexec github.com/containers/storage/pkg/regexp github.com/containers/storage/pkg/system github.com/containers/storage/pkg/unshare -# github.com/cppforlife/cobrautil v0.0.0-20221130162803-acdfead391ef -## explicit -github.com/cppforlife/cobrautil -# github.com/cppforlife/color v1.9.1-0.20200716202919-6706ac40b835 -## explicit; go 1.13 -github.com/cppforlife/color -# github.com/cppforlife/go-cli-ui v0.0.0-20220425131040-94f26b16bc14 -## explicit; go 1.13 -github.com/cppforlife/go-cli-ui/ui -github.com/cppforlife/go-cli-ui/ui/table # github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer @@ -589,9 +562,6 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-version v1.6.0 -## explicit -github.com/hashicorp/go-version # github.com/huandu/xstrings v1.5.0 ## explicit; go 1.12 github.com/huandu/xstrings @@ -614,33 +584,6 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/k14s/starlark-go v0.0.0-20200720175618-3a5c849cc368 -## explicit; go 1.13 -github.com/k14s/starlark-go/internal/compile -github.com/k14s/starlark-go/internal/spell -github.com/k14s/starlark-go/resolve -github.com/k14s/starlark-go/starlark -github.com/k14s/starlark-go/starlarkstruct -github.com/k14s/starlark-go/syntax -# github.com/k14s/ytt v0.36.0 -## explicit; go 1.16 -github.com/k14s/ytt/pkg/cmd/template -github.com/k14s/ytt/pkg/cmd/ui -github.com/k14s/ytt/pkg/filepos -github.com/k14s/ytt/pkg/files -github.com/k14s/ytt/pkg/orderedmap -github.com/k14s/ytt/pkg/schema -github.com/k14s/ytt/pkg/template -github.com/k14s/ytt/pkg/template/core -github.com/k14s/ytt/pkg/texttemplate -github.com/k14s/ytt/pkg/version -github.com/k14s/ytt/pkg/workspace -github.com/k14s/ytt/pkg/workspace/ref -github.com/k14s/ytt/pkg/yamlmeta -github.com/k14s/ytt/pkg/yamlmeta/internal/yaml.v2 -github.com/k14s/ytt/pkg/yamltemplate -github.com/k14s/ytt/pkg/yttlibrary -github.com/k14s/ytt/pkg/yttlibrary/overlay # github.com/klauspost/compress v1.18.0 ## explicit; go 1.22 github.com/klauspost/compress @@ -928,13 +871,6 @@ github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter github.com/vbauerster/mpb/v8/decor github.com/vbauerster/mpb/v8/internal -# github.com/vito/go-interact v1.0.1 -## explicit; go 1.12 -github.com/vito/go-interact/interact -# github.com/vmware-tanzu/carvel-kapp-controller v0.51.0 -## explicit; go 1.21 -github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/kappctrl/v1alpha1 -github.com/vmware-tanzu/carvel-kapp-controller/pkg/apis/packaging/v1alpha1 # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 @@ -1032,8 +968,8 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.33.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.36.0 +## explicit; go 1.23.0 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -1056,14 +992,14 @@ golang.org/x/crypto/sha3 ## explicit; go 1.23.0 golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.23.0 -## explicit; go 1.22.0 +# golang.org/x/mod v0.24.0 +## explicit; go 1.23.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.35.0 -## explicit; go 1.18 +# golang.org/x/net v0.37.0 +## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom @@ -1081,23 +1017,23 @@ golang.org/x/net/websocket ## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.11.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.30.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.29.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.22.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -1124,8 +1060,8 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.10.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.30.0 -## explicit; go 1.22.0 +# golang.org/x/tools v0.31.0 +## explicit; go 1.23.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/analysistest golang.org/x/tools/go/analysis/checker @@ -1291,7 +1227,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.17.1 +# helm.sh/helm/v3 v3.17.2 ## explicit; go 1.23.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -1934,7 +1870,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kubectl v0.32.1 +# k8s.io/kubectl v0.32.2 ## explicit; go 1.23.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme