diff --git a/cmd/init.go b/cmd/init.go index aea903b88c..45ecaecbfc 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -9,22 +9,22 @@ import ( "regexp" "strconv" "strings" - + "github.com/loft-sh/devspace/pkg/util/ptr" "mvdan.cc/sh/v3/expand" - + "github.com/loft-sh/devspace/pkg/devspace/compose" "github.com/loft-sh/devspace/pkg/devspace/config/localcache" "github.com/sirupsen/logrus" - + "github.com/loft-sh/devspace/cmd/flags" "github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath" yaml "gopkg.in/yaml.v3" - + "github.com/loft-sh/devspace/pkg/devspace/hook" - + "github.com/loft-sh/devspace/pkg/devspace/plugin" - + "github.com/loft-sh/devspace/pkg/devspace/build/builder/helper" "github.com/loft-sh/devspace/pkg/devspace/config/constants" "github.com/loft-sh/devspace/pkg/devspace/config/loader" @@ -61,7 +61,7 @@ const ( // InitCmd is a struct that defines a command call for "init" type InitCmd struct { *flags.GlobalFlags - + // Flags Reconfigure bool Dockerfile string @@ -76,7 +76,7 @@ func NewInitCmd(f factory.Factory) *cobra.Command { log: f.GetLog(), GlobalFlags: globalFlags, } - + initCmd := &cobra.Command{ Use: "init", Short: "Initializes DevSpace in the current folder", @@ -94,12 +94,12 @@ folder. Creates a devspace.yaml as a starting point. return cmd.Run(f) }, } - + initCmd.Flags().BoolVarP(&cmd.Reconfigure, "reconfigure", "r", false, "Change existing configuration") initCmd.Flags().StringVar(&cmd.Context, "context", "", "Context path to use for intialization") initCmd.Flags().StringVar(&cmd.Dockerfile, "dockerfile", helper.DefaultDockerfilePath, "Dockerfile to use for initialization") initCmd.Flags().StringVar(&cmd.Provider, "provider", "", "The cloud provider to use") - + return initCmd } @@ -123,39 +123,39 @@ func (cmd *InitCmd) Run(f factory.Factory) error { if err != nil { return err } - + if response == optionNo { return nil } } - + // Delete config & overwrite config os.RemoveAll(".devspace") - + // Delete configs path os.Remove(constants.DefaultConfigsPath) - + // Delete config & overwrite config os.Remove(constants.DefaultConfigPath) - + // Delete config & overwrite config os.Remove(constants.DefaultVarsPath) - + // Execute plugin hook err = hook.ExecuteHooks(nil, nil, "init") if err != nil { return err } - + // Print DevSpace logo log.PrintLogo() - + // Determine if we're initializing from scratch, or using docker-compose.yaml dockerComposePath, generateFromDockerCompose, err := cmd.shouldGenerateFromDockerCompose() if err != nil { return err } - + if generateFromDockerCompose { err = cmd.initDockerCompose(f, dockerComposePath) } else { @@ -164,12 +164,12 @@ func (cmd *InitCmd) Run(f factory.Factory) error { if err != nil { return err } - + cmd.log.WriteString(logrus.InfoLevel, "\n") cmd.log.Done("Project successfully initialized") cmd.log.Info("Configuration saved in devspace.yaml - you can make adjustments as needed") cmd.log.Infof("\r \nYou can now run:\n1. %s - to pick which Kubernetes namespace to work in\n2. %s - to start developing your project in Kubernetes\n\nRun `%s` or `%s` to see a list of available commands and flags\n", ansi.Color("devspace use namespace", "blue+b"), ansi.Color("devspace dev", "blue+b"), ansi.Color("devspace -h", "blue+b"), ansi.Color("devspace [command] -h", "blue+b")) - + return nil } @@ -179,17 +179,17 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + err = languageHandler.CopyTemplates(".", false) if err != nil { return err } - + startScriptAbsPath, err := filepath.Abs(startScriptName) if err != nil { return err } - + _, err = os.Stat(startScriptAbsPath) if err == nil { // Ensure file is executable @@ -198,9 +198,9 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo return err } } - + var config *latest.Config - + // create kubectl client client, err := f.NewKubeClientFromContext(cmd.GlobalFlags.KubeContext, cmd.GlobalFlags.Namespace) if err == nil { @@ -209,12 +209,12 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo config = configInterface.Config() } } - + localCache, err := localcache.NewCacheLoader().Load(constants.DefaultConfigPath) if err != nil { return err } - + if config == nil { // Create config config = latest.New().(*latest.Config) @@ -222,22 +222,22 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo return err } } - + // Create ConfigureManager configureManager := f.NewConfigureManager(config, localCache, cmd.log) - + // Determine name for this devspace project projectName, projectNamespace, err := getProjectName() if err != nil { return err } - + config.Name = projectName - + imageName := "app" selectedDeploymentOption := "" mustAddComponentChart := false - + for { selectedDeploymentOption, err = cmd.log.Question(&survey.QuestionOptions{ Question: "How do you want to deploy this project?", @@ -250,13 +250,13 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + isQuickstart := strings.HasPrefix(projectName, "devspace-quickstart-") - + if selectedDeploymentOption != DeployOptionHelm && isQuickstart { cmd.log.WriteString(logrus.InfoLevel, "\n") cmd.log.Warn("If this is a DevSpace quickstart project, you should use Helm!") - + useHelm := "Yes" helmAnswer, err := cmd.log.Question(&survey.QuestionOptions{ Question: "Do you want to switch to using Helm as suggested?", @@ -268,12 +268,12 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + if helmAnswer == useHelm { selectedDeploymentOption = DeployOptionHelm } } - + if selectedDeploymentOption == DeployOptionHelm { if isQuickstart { quickstartYes := "Yes" @@ -287,12 +287,12 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + if quickstartAnswer == quickstartYes { mustAddComponentChart = true } } - + if !mustAddComponentChart { hasOwnHelmChart := "Yes" helmChartAnswer, err := cmd.log.Question(&survey.QuestionOptions{ @@ -305,7 +305,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + if helmChartAnswer == hasOwnHelmChart { err = configureManager.AddHelmDeployment(imageName) if err != nil { @@ -313,7 +313,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo cmd.log.WriteString(logrus.InfoLevel, "\n") cmd.log.Errorf("Error: %s", err.Error()) } - + // Retry questions on error continue } @@ -328,14 +328,14 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo cmd.log.WriteString(logrus.InfoLevel, "\n") cmd.log.Errorf("Error: %s", err.Error()) } - + // Retry questions on error continue } } break } - + developProject := "I want to develop this project and my current working dir contains the source code" deployProject := "I just want to deploy this project" defaultProjectAction := deployProject @@ -350,7 +350,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + image := "" if developOrDeployProject == developProject { for { @@ -359,16 +359,16 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return errors.Wrap(err, "error rendering deployment") } - + images, err := parseImages(manifests) if err != nil { return errors.Wrap(err, "error parsing images") } - + imageManual := "Manually enter the image I want to work on" imageSkip := "Skip (do not add dev configuration for any images)" imageAnswer := "" - + if len(images) > 0 { imageAnswer, err = cmd.log.Question(&survey.QuestionOptions{ Question: "Which image do you want to develop with DevSpace?", @@ -387,24 +387,24 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo return err } } - + if imageAnswer == imageSkip { break } else if imageAnswer == imageManual { imageQuestion := "What is the main container image of this project?" - + if selectedDeploymentOption == DeployOptionHelm { imageQuestion = "What is the main container image of this project which is deployed by this Helm chart? (e.g. ecr.io/project/image)" } - + if selectedDeploymentOption == DeployOptionKubectl { imageQuestion = "What is the main container image of this project which is deployed by these manifests? (e.g. ecr.io/project/image)" } - + if selectedDeploymentOption == DeployOptionKustomize { imageQuestion = "What is the main container image of this project which is deployed by this Kustomization? (e.g. ecr.io/project/image)" } - + image, err = cmd.log.Question(&survey.QuestionOptions{ Question: imageQuestion, ValidationMessage: "Please enter a valid container image from a Kubernetes pod (e.g. myregistry.tld/project/image)", @@ -420,7 +420,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo image = imageAnswer } } - + err = configureManager.AddImage(imageName, image, projectNamespace+"/"+projectName, cmd.Dockerfile) if err != nil { if err.Error() != "" { @@ -431,13 +431,13 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo } } } - + // Determine app port portString := "" - + if len(config.Images) > 0 { image = config.Images[imageName].Image - + // Try to get ports from dockerfile ports, err := dockerfile.GetPorts(config.Images[imageName].Dockerfile) if err == nil { @@ -451,14 +451,14 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo if err != nil { return err } - + if portString == "" { portString = strconv.Itoa(ports[0]) } } } } - + if portString == "" { portString, err = cmd.log.Question(&survey.QuestionOptions{ Question: "Which port is your application listening on? (Enter to skip)", @@ -468,7 +468,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo return err } } - + port := 0 if portString != "" { port, err = strconv.Atoi(portString) @@ -476,7 +476,7 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo return errors.Wrap(err, "error parsing port") } } - + // Add component deployment if selected if mustAddComponentChart { err = configureManager.AddComponentDeployment(imageName, image, port) @@ -484,26 +484,26 @@ func (cmd *InitCmd) initDevspace(f factory.Factory, configLoader loader.ConfigLo return err } } - + // Add the development configuration err = cmd.addDevConfig(config, imageName, image, port, languageHandler) if err != nil { return err } - + if config.Commands == nil { config.Commands = map[string]*latest.CommandConfig{} - + config.Commands["migrate-db"] = &latest.CommandConfig{ Command: `echo 'This is a cross-platform, shared command that can be used to codify any kind of dev task.' echo 'Anyone using this project can invoke it via "devspace run migrate-db"'`, } } - + if config.Pipelines == nil { config.Pipelines = map[string]*latest.Pipeline{} } - + // Add pipeline: dev config.Pipelines["dev"] = &latest.Pipeline{ Run: `run_dependencies --all # 1. Deploy any projects this project needs (see "dependencies") @@ -511,7 +511,7 @@ ensure_pull_secrets --all # 2. Ensure pull secrets create_deployments --all # 3. Deploy Helm charts and manifests specfied as "deployments" start_dev ` + imageName + ` # 4. Start dev mode "` + imageName + `" (see "dev" section)`, } - + // Add pipeline: dev config.Pipelines["deploy"] = &latest.Pipeline{ Run: `run_dependencies --all # 1. Deploy any projects this project needs (see "dependencies") @@ -519,31 +519,31 @@ ensure_pull_secrets --all # 2. Ensure pull secrets build_images --all -t $(git describe --always) # 3. Build, tag (git commit hash) and push all images (see "images") create_deployments --all # 4. Deploy Helm charts and manifests specfied as "deployments"`, } - + // Save config err = loader.Save(constants.DefaultConfigPath, config) if err != nil { return err } - + // Save generated err = localCache.Save() if err != nil { return errors.Errorf("Error saving generated file: %v", err) } - + // Add .devspace/ to .gitignore err = appendToIgnoreFile(gitIgnoreFile, devspaceFolderGitignore) if err != nil { cmd.log.Warn(err) } - + configPath := loader.ConfigPath("") err = annotateConfig(configPath) if err != nil { return err } - + return nil } @@ -552,71 +552,71 @@ func (cmd *InitCmd) initDockerCompose(f factory.Factory, composePath string) err if err != nil { return err } - + projectName, _, err := getProjectName() if err != nil { return err } - + project.Name = projectName - + // Prompt user for entrypoints for each container with sync folders. for idx, service := range project.Services { localPaths := compose.GetServiceSyncPaths(project, service) noEntryPoint := len(service.Entrypoint) == 0 hasSyncEndpoints := len(localPaths) > 0 - + if noEntryPoint && hasSyncEndpoints { entrypointStr, err := cmd.log.Question(&survey.QuestionOptions{ - Question: "How is this container started? (e.g. npm start, gradle run, go run main.go)", + Question: fmt.Sprintf(`How is this container "%s" started? (e.g. npm start, gradle run, go run main.go)`, service.Name), }) if err != nil { return err } - + entrypoint := strings.Split(entrypointStr, " ") project.Services[idx].Entrypoint = entrypoint } } - + // Generate DevSpace configuration composeManager := compose.NewComposeManager(project) err = composeManager.Load(cmd.log) if err != nil { return err } - + // Save each configuration file for path, config := range composeManager.Configs() { localCache, err := localcache.NewCacheLoader().Load(path) if err != nil { return err } - + // Save config err = loader.Save(path, config) if err != nil { return err } - + // Save generated err = localCache.Save() if err != nil { return errors.Errorf("Error saving generated file: %v", err) } - + // Add .devspace/ to .gitignore err = appendToIgnoreFile(gitIgnoreFile, devspaceFolderGitignore) if err != nil { cmd.log.Warn(err) } - + err = annotateConfig(path) if err != nil { return err } } - + return nil } @@ -625,11 +625,11 @@ func annotateConfig(configPath string) error { if err != nil { panic(err) } - + annotatedConfig = regexp.MustCompile("(?m)(\n\\s{2,6}name:.*)").ReplaceAll(annotatedConfig, []byte("")) annotatedConfig = regexp.MustCompile("(?s)(\n deploy:.*)(\n dev:.*)(\nimages:)").ReplaceAll(annotatedConfig, []byte("$2$1$3")) annotatedConfig = regexp.MustCompile("(?s)(\n imageSelector:.*?)(\n.*)(\n devImage:.*?)(\n)").ReplaceAll(annotatedConfig, []byte("$1$3$2$4")) - + configAnnotations := map[string]string{ "(?m)^(pipelines:)": "\n# This is a list of `pipelines` that DevSpace can execute (you can define your own)\n$1", "(?m)^( )(deploy:)": "$1# You can run this pipeline via `devspace deploy` (or `devspace run-pipeline deploy`)\n$1$2", @@ -651,11 +651,11 @@ func annotateConfig(configPath string) error { "(?m)^( )(proxyCommands:)": "$1# Make the following commands from my local machine available inside the dev container\n$1$2", "(?m)^(commands:)": "\n# Use the `commands` section to define repeatable dev workflows for this project \n$1", } - + for expr, replacement := range configAnnotations { annotatedConfig = regexp.MustCompile(expr).ReplaceAll(annotatedConfig, []byte(replacement)) } - + annotatedConfig = append(annotatedConfig, []byte(` # Define dependencies to other projects with a devspace.yaml # dependencies: @@ -665,12 +665,12 @@ func annotateConfig(configPath string) error { # ui: # path: ./ui # Path-based dependencies (for monorepos) `)...) - + err = os.WriteFile(configPath, annotatedConfig, os.ModePerm) if err != nil { return err } - + return nil } @@ -678,21 +678,21 @@ func (cmd *InitCmd) addDevConfig(config *latest.Config, imageName, image string, if config.Dev == nil { config.Dev = map[string]*latest.DevPod{} } - + devConfig, ok := config.Dev[imageName] if !ok { devConfig = &latest.DevPod{} config.Dev[imageName] = devConfig } - + devConfig.ImageSelector = image - + if port > 0 { localPort := port if localPort < 1024 { cmd.log.WriteString(logrus.InfoLevel, "\n") cmd.log.Warn("Your application listens on a system port [0-1024]. Choose a forwarding-port to access your application via localhost.") - + portString, err := cmd.log.Question(&survey.QuestionOptions{ Question: "Which forwarding port [1024-49151] do you want to use to access your application?", DefaultValue: strconv.Itoa(localPort + 8000), @@ -700,13 +700,13 @@ func (cmd *InitCmd) addDevConfig(config *latest.Config, imageName, image string, if err != nil { return err } - + localPort, err = strconv.Atoi(portString) if err != nil { return errors.Errorf("Error parsing port '%s'", portString) } } - + // Add dev.ports portMapping := latest.PortMapping{ Port: fmt.Sprintf("%d", port), @@ -716,12 +716,12 @@ func (cmd *InitCmd) addDevConfig(config *latest.Config, imageName, image string, Port: fmt.Sprintf("%d:%d", localPort, port), } } - + if devConfig.Ports == nil { devConfig.Ports = []*latest.PortMapping{} } devConfig.Ports = append(devConfig.Ports, &portMapping) - + if devConfig.Open == nil { devConfig.Open = []*latest.OpenConfig{} } @@ -729,44 +729,44 @@ func (cmd *InitCmd) addDevConfig(config *latest.Config, imageName, image string, URL: "http://localhost:" + strconv.Itoa(localPort), }) } - + if devConfig.Sync == nil { devConfig.Sync = []*latest.SyncConfig{} } - + syncConfig := &latest.SyncConfig{ Path: "./", } - + if _, err := os.Stat("node_modules"); err == nil { syncConfig.UploadExcludePaths = append(syncConfig.UploadExcludePaths, "node_modules") } - + if _, err := os.Stat(".dockerignore"); err == nil { syncConfig.UploadExcludeFile = ".dockerignore" } - + devConfig.Sync = append(devConfig.Sync, syncConfig) - + devConfig.Terminal = &latest.Terminal{ Command: "./" + startScriptName, } - + devImage, err := languageHandler.GetDevImage() if err != nil { return err } - + devConfig.DevImage = devImage - + devConfig.SSH = &latest.SSH{ Enabled: ptr.Bool(true), } - + if devConfig.ProxyCommands == nil { devConfig.ProxyCommands = []*latest.ProxyCommand{} } - + devConfig.ProxyCommands = append(devConfig.ProxyCommands, []*latest.ProxyCommand{ { Command: "devspace", @@ -781,7 +781,7 @@ func (cmd *InitCmd) addDevConfig(config *latest.Config, imageName, image string, GitCredentials: true, }, }...) - + return nil } @@ -793,7 +793,7 @@ func (cmd *InitCmd) render(f factory.Factory, config *latest.Config) (string, er if err != nil { return "", errors.Wrap(err, "temp render.yaml") } - + silent := true if cmd.Debug { silent = false @@ -816,7 +816,7 @@ func (cmd *InitCmd) render(f factory.Factory, config *latest.Config) (string, er if err != nil { return "", errors.Wrap(err, "devspace render") } - + return writer.String(), nil } @@ -834,7 +834,7 @@ func (cmd *InitCmd) shouldGenerateFromDockerCompose() (string, bool, error) { if err != nil { return "", false, err } - + return dockerComposePath, selectedDockerComposeOption == DockerComposeDevSpaceConfigOption, nil } return "", false, nil @@ -850,14 +850,14 @@ func appendToIgnoreFile(ignoreFile, content string) error { if err != nil { return errors.Errorf("Error reading file %s: %v", ignoreFile, err) } - + // append only if not found in file content if !strings.Contains(string(fileContent), content) { file, err := os.OpenFile(ignoreFile, os.O_APPEND|os.O_WRONLY, 0600) if err != nil { return errors.Errorf("Error writing file %s: %v", ignoreFile, err) } - + defer file.Close() if _, err = file.WriteString(content); err != nil { return errors.Errorf("Error writing file %s: %v", ignoreFile, err) @@ -880,7 +880,7 @@ func getProjectName() (string, string, error) { projectName = projectParts[partsLen-1] } } - + if projectName == "" { absPath, err := filepath.Abs(".") if err != nil { @@ -888,22 +888,22 @@ func getProjectName() (string, string, error) { } projectName = filepath.Base(absPath) } - + projectName = strings.ToLower(projectName) projectName = regexp.MustCompile("[^a-zA-Z0-9- ]+").ReplaceAllString(projectName, "") projectName = regexp.MustCompile("[^a-zA-Z0-9-]+").ReplaceAllString(projectName, "-") projectName = strings.Trim(projectName, "-") - + if !SpaceNameValidationRegEx.MatchString(projectName) || len(projectName) > 42 { projectName = "devspace" } - + return projectName, projectNamespace, nil } func parseImages(manifests string) ([]string, error) { images := []string{} - + var doc yaml.Node dec := yaml.NewDecoder(bytes.NewReader([]byte(manifests))) for dec.Decode(&doc) == nil { @@ -911,16 +911,16 @@ func parseImages(manifests string) ([]string, error) { if err != nil { return nil, err } - + matches, err := path.Find(&doc) if err != nil { return nil, err } - + for _, match := range matches { images = append(images, match.Value) } } - + return images, nil } diff --git a/e2e/tests/init/init.go b/e2e/tests/init/init.go index d906a0905d..f75f792052 100644 --- a/e2e/tests/init/init.go +++ b/e2e/tests/init/init.go @@ -8,9 +8,9 @@ import ( "os" "path/filepath" "strings" - + "github.com/loft-sh/devspace/pkg/devspace/config/loader/variable" - + "github.com/loft-sh/devspace/cmd" "github.com/loft-sh/devspace/cmd/flags" "github.com/loft-sh/devspace/e2e/framework" @@ -25,55 +25,55 @@ var _ = DevSpaceDescribe("init", func() { if err != nil { panic(err) } - + // create a new factory var ( f *framework.DefaultFactory kubeClient *kube.KubeHelper ) - + ginkgo.BeforeEach(func() { f = framework.NewDefaultFactory() - + kubeClient, err = kube.NewKubeHelper() framework.ExpectNoError(err) }) - + ginkgo.It("should create devspace.yml without registry details", func() { tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") framework.ExpectNoError(err) defer framework.CleanupTempDir(initialDir, tempDir) - + // set the question answer func here f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { if strings.Contains(params.Question, "How do you want to deploy this project?") { return cmd.DeployOptionHelm, nil } - + if strings.Contains(params.Question, "If you were to push any images, which container registry would you want to push to?") { return "Skip Registry", nil } - + if strings.Contains(params.Question, "How should DevSpace build the container image for this project?") { return "Skip / I don't know", nil } - + return params.DefaultValue, nil }) - + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} err = initCmd.Run(f) framework.ExpectNoError(err) - + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) framework.ExpectNoError(err) - + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) - + ns, err := kubeClient.CreateNamespace("init") framework.ExpectNoError(err) defer framework.ExpectDeleteNamespace(kubeClient, ns) - + done := make(chan error) go func() { devCmd := &cmd.RunPipelineCmd{ @@ -85,46 +85,46 @@ var _ = DevSpaceDescribe("init", func() { } done <- devCmd.RunDefault(f) }() - + err = <-done framework.ExpectNoError(err) }) - + ginkgo.It("should create devspace.yml without registry details and manifests deploy", func() { tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") framework.ExpectNoError(err) defer framework.CleanupTempDir(initialDir, tempDir) - + // set the question answer func here f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { return "Skip Registry", nil } - + if strings.Contains(params.Question, "How do you want to deploy this project?") { return cmd.DeployOptionKubectl, nil } - + if strings.Contains(params.Question, "Please enter the paths to your Kubernetes manifests") { return "manifests/**", nil } - + return params.DefaultValue, nil }) - + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} err = initCmd.Run(f) framework.ExpectNoError(err) - + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) framework.ExpectNoError(err) - + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) - + ns, err := kubeClient.CreateNamespace("init") framework.ExpectNoError(err) defer framework.ExpectDeleteNamespace(kubeClient, ns) - + devCmd := &cmd.RunPipelineCmd{ GlobalFlags: &flags.GlobalFlags{ NoWarn: true, @@ -136,42 +136,42 @@ var _ = DevSpaceDescribe("init", func() { err = devCmd.RunDefault(f) framework.ExpectNoError(err) }) - + ginkgo.It("should create devspace.yml without registry details and kustomize deploy", func() { tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") framework.ExpectNoError(err) defer framework.CleanupTempDir(initialDir, tempDir) - + // set the question answer func here f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { return "Skip Registry", nil } - + if strings.Contains(params.Question, "How do you want to deploy this project?") { return cmd.DeployOptionKustomize, nil } - + if strings.Contains(params.Question, "Please enter path to your Kustomization folder") { return "./kustomization", nil } - + return params.DefaultValue, nil }) - + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} err = initCmd.Run(f) framework.ExpectNoError(err) - + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) framework.ExpectNoError(err) - + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) - + ns, err := kubeClient.CreateNamespace("init") framework.ExpectNoError(err) defer framework.ExpectDeleteNamespace(kubeClient, ns) - + done := make(chan error) go func() { devCmd := &cmd.RunPipelineCmd{ @@ -183,50 +183,50 @@ var _ = DevSpaceDescribe("init", func() { } done <- devCmd.RunDefault(f) }() - + err = <-done framework.ExpectNoError(err) }) - + ginkgo.It("should create devspace.yml without registry details and local helm chart deploy", func() { tempDir, err := framework.CopyToTempDir("tests/init/testdata/new") framework.ExpectNoError(err) defer framework.CleanupTempDir(initialDir, tempDir) - + // set the question answer func here f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { if strings.Contains(params.Question, "Which registry would you want to use to push images to?") { return "Skip Registry", nil } - + if strings.Contains(params.Question, "How do you want to deploy this project?") { return cmd.DeployOptionHelm, nil } - + if strings.Contains(params.Question, "Which Helm chart do you want to use?") { return `Use a local Helm chart (e.g. ./helm/chart/)`, nil } - + if strings.Contains(params.Question, "Please enter the relative path to your local Helm chart") { return "./chart", nil } - + return params.DefaultValue, nil }) - + initCmd := &cmd.InitCmd{GlobalFlags: &flags.GlobalFlags{}} err = initCmd.Run(f) framework.ExpectNoError(err) - + config, _, err := framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) framework.ExpectNoError(err) - + framework.ExpectEqual(len(config.Variables()), len(variable.AlwaysResolvePredefinedVars)) - + ns, err := kubeClient.CreateNamespace("init") framework.ExpectNoError(err) defer framework.ExpectDeleteNamespace(kubeClient, ns) - + done := make(chan error) go func() { devCmd := &cmd.RunPipelineCmd{ @@ -238,42 +238,42 @@ var _ = DevSpaceDescribe("init", func() { } done <- devCmd.RunDefault(f) }() - + err = <-done framework.ExpectNoError(err) }) - + ginkgo.It("should create devspace.yml from docker-compose.yaml", func() { tempDir, err := framework.CopyToTempDir("tests/init/testdata/docker-compose") framework.ExpectNoError(err) defer framework.CleanupTempDir(initialDir, tempDir) - + ns, err := kubeClient.CreateNamespace("init") framework.ExpectNoError(err) defer func() { err := kubeClient.DeleteNamespace(ns) framework.ExpectNoError(err) }() - + // Answer all questions with the default f.SetAnswerFunc(func(params *survey.QuestionOptions) (string, error) { return params.DefaultValue, nil }) - + initCmd := &cmd.InitCmd{ Reconfigure: true, } err = initCmd.Run(f) framework.ExpectNoError(err) - + // Created a devspace.yaml _, _, err = framework.LoadConfig(f, kubeClient.Client(), filepath.Join(tempDir, "devspace.yaml")) framework.ExpectNoError(err) - + // Created a .gitignore _, err = os.Stat(filepath.Join(tempDir, ".gitignore")) framework.ExpectNoError(err) - + // Print the config to verify the expected deployment var configBuffer bytes.Buffer printCmd := &cmd.PrintCmd{ @@ -283,18 +283,18 @@ var _ = DevSpaceDescribe("init", func() { }, Out: &configBuffer, } - + err = printCmd.Run(f) framework.ExpectNoError(err) - + generatedConfig := &latest.Config{} err = yaml.Unmarshal(configBuffer.Bytes(), generatedConfig) framework.ExpectNoError(err) - + // validate config framework.ExpectEqual(len(generatedConfig.Deployments), 1) framework.ExpectHaveKey(generatedConfig.Deployments, "db") - + // ensure valid configuration by deploying the application deployCmd := &cmd.RunPipelineCmd{ GlobalFlags: &flags.GlobalFlags{ @@ -306,7 +306,7 @@ var _ = DevSpaceDescribe("init", func() { SkipPush: true, } err = deployCmd.RunDefault(f) - + framework.ExpectNoError(err) }) }) diff --git a/e2e/tests/init/testdata/docker-compose/docker-compose.yaml b/e2e/tests/init/testdata/docker-compose/docker-compose.yaml index b508965aca..87c6d52577 100644 --- a/e2e/tests/init/testdata/docker-compose/docker-compose.yaml +++ b/e2e/tests/init/testdata/docker-compose/docker-compose.yaml @@ -1,3 +1,5 @@ +name: shiken + version: "3.7" services: db: diff --git a/go.mod b/go.mod index ee36b7edb2..ea7d17c3b0 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/blang/semver v3.5.1+incompatible github.com/bmatcuk/doublestar v1.1.1 - github.com/compose-spec/compose-go v1.2.2 + github.com/compose-spec/compose-go v1.20.0 github.com/creack/pty v1.1.18 github.com/docker/cli v24.0.0+incompatible github.com/docker/distribution v2.8.2+incompatible @@ -89,7 +89,7 @@ require ( github.com/containerd/typeurl v1.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e // indirect + github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect @@ -123,7 +123,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -140,7 +140,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect @@ -184,6 +184,7 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.3.0 // indirect diff --git a/go.sum b/go.sum index ca55218653..c5d7142da5 100644 --- a/go.sum +++ b/go.sum @@ -152,6 +152,10 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/compose-spec/compose-go v1.2.2 h1:y1dwl3KUTBnWPVur6EZno9zUIum6Q87/F5keljnGQB4= github.com/compose-spec/compose-go v1.2.2/go.mod h1:pAy7Mikpeft4pxkFU565/DRHEbDfR84G6AQuiL+Hdg8= +github.com/compose-spec/compose-go v1.8.0 h1:fD2b8YDZVcSicKM0EEXsUdy+97PKza6+bjuXfSloNdM= +github.com/compose-spec/compose-go v1.8.0/go.mod h1:Tb5Ae2PsYN3GTqYqzl2IRbTPiJtPZZjMw8UKUvmehFk= +github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4= +github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -293,6 +297,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e h1:n81KvOMrLZa+VWHwST7dun9f0G98X3zREHS1ztYzZKU= github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e/go.mod h1:xpWTC2KnJMiDLkoawhsPQcXjvwATEBcbq0xevG2YR9M= +github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9/go.mod h1:6rIc5NMSjXjjnwzWWy3HAm9gDBu+X7aCzL8VrHIKgxM= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -563,6 +568,9 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8= @@ -680,6 +688,8 @@ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTS github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/buildkit v0.11.4 h1:mleVHr+n7HUD65QNUkgkT3d8muTzhYUoHE9FM3Ej05s= github.com/moby/buildkit v0.11.4/go.mod h1:P5Qi041LvCfhkfYBHry+Rwoo3Wi6H971J2ggE+PcIoo= @@ -1055,6 +1065,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1436,6 +1448,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20191026110619-0b21df46bc1d/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/pkg/devspace/compose/deployment.go b/pkg/devspace/compose/deployment.go index 778157ef2a..e8871ec8a1 100644 --- a/pkg/devspace/compose/deployment.go +++ b/pkg/devspace/compose/deployment.go @@ -4,9 +4,8 @@ import ( "fmt" "sort" "strconv" - "strings" "time" - + composetypes "github.com/compose-spec/compose-go/types" "github.com/loft-sh/devspace/pkg/devspace/config/versions/latest" v1 "k8s.io/api/core/v1" @@ -14,18 +13,18 @@ import ( func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, service composetypes.ServiceConfig) error { values := map[string]interface{}{} - + volumes, volumeMounts, _ := volumesConfig(service, dockerCompose.Volumes, cb.log) if len(volumes) > 0 { values["volumes"] = volumes } - + container, err := containerConfig(service, volumeMounts) if err != nil { return err } values["containers"] = []interface{}{container} - + if service.Restart != "" { restartPolicy := string(v1.RestartPolicyNever) switch service.Restart { @@ -36,7 +35,7 @@ func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, serv } values["restartPolicy"] = restartPolicy } - + ports := []interface{}{} for _, port := range service.Ports { var protocol string @@ -48,24 +47,24 @@ func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, serv default: return fmt.Errorf("invalid protocol %s", port.Protocol) } - + if port.Published == "" { cb.log.Warnf("Unassigned ports are not supported: %s", port.Target) continue } - + portNumber, err := strconv.Atoi(port.Published) if err != nil { return err } - + ports = append(ports, map[string]interface{}{ "port": portNumber, "containerPort": int(port.Target), "protocol": protocol, }) } - + for _, port := range service.Expose { intPort, err := strconv.Atoi(port) if err != nil { @@ -75,22 +74,19 @@ func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, serv "port": intPort, }) } - + if len(ports) > 0 { values["service"] = map[string]interface{}{ "ports": ports, } } - + if len(service.ExtraHosts) > 0 { hostsMap := map[string][]interface{}{} - for _, host := range service.ExtraHosts { - hostTokens := strings.Split(host, ":") - hostName := hostTokens[0] - hostIP := hostTokens[1] + for hostName, hostIP := range service.ExtraHosts { hostsMap[hostIP] = append(hostsMap[hostIP], hostName) } - + hostAliases := []interface{}{} for ip, hosts := range hostsMap { hostAliases = append(hostAliases, map[string]interface{}{ @@ -98,23 +94,23 @@ func (cb *configBuilder) AddDeployment(dockerCompose *composetypes.Project, serv "hostnames": hosts, }) } - + values["hostAliases"] = hostAliases } - + deployment := &latest.DeploymentConfig{ Helm: &latest.HelmConfig{ Values: values, }, } - + if cb.config.Deployments == nil { cb.config.Deployments = map[string]*latest.DeploymentConfig{} } - + deploymentName := formatName(service.Name) cb.config.Deployments[deploymentName] = deployment - + return nil } @@ -123,22 +119,22 @@ func containerConfig(service composetypes.ServiceConfig, volumeMounts []interfac "name": containerName(service), "image": resolveImage(service), } - + if len(service.Command) > 0 { container["args"] = shellCommandToSlice(service.Command) } - + if service.Build == nil && len(service.Entrypoint) > 0 { container["command"] = shellCommandToSlice(service.Entrypoint) } - + if service.Environment != nil { env := containerEnv(service.Environment) if len(env) > 0 { container["env"] = env } } - + if service.HealthCheck != nil { livenessProbe, err := containerLivenessProbe(service.HealthCheck) if err != nil { @@ -148,11 +144,11 @@ func containerConfig(service composetypes.ServiceConfig, volumeMounts []interfac container["livenessProbe"] = livenessProbe } } - + if len(volumeMounts) > 0 { container["volumeMounts"] = volumeMounts } - + return container, nil } @@ -163,7 +159,7 @@ func containerEnv(env composetypes.MappingWithEquals) []interface{} { keys = append(keys, name) } sort.Strings(keys) - + for _, name := range keys { value := env[name] envs = append(envs, map[string]interface{}{ @@ -185,7 +181,7 @@ func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string] if len(health.Test) == 0 { return nil, nil } - + var command []interface{} testKind := health.Test[0] switch testKind { @@ -202,17 +198,17 @@ func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string] default: command = append(command, health.Test[0:]) } - + livenessProbe := map[string]interface{}{ "exec": map[string]interface{}{ "command": command, }, } - + if health.Retries != nil { livenessProbe["failureThreshold"] = int(*health.Retries) } - + if health.Interval != nil { period, err := time.ParseDuration(health.Interval.String()) if err != nil { @@ -220,7 +216,7 @@ func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string] } livenessProbe["periodSeconds"] = int(period.Seconds()) } - + if health.StartPeriod != nil { initialDelay, err := time.ParseDuration(health.Interval.String()) if err != nil { @@ -228,7 +224,7 @@ func containerLivenessProbe(health *composetypes.HealthCheckConfig) (map[string] } livenessProbe["initialDelaySeconds"] = int(initialDelay.Seconds()) } - + return livenessProbe, nil } diff --git a/pkg/devspace/compose/manager.go b/pkg/devspace/compose/manager.go index 1e3ea4cf3f..672cdf88a3 100644 --- a/pkg/devspace/compose/manager.go +++ b/pkg/devspace/compose/manager.go @@ -1,12 +1,13 @@ package compose import ( + "context" "fmt" "os" "path/filepath" "strconv" "strings" - + composeloader "github.com/compose-spec/compose-go/loader" composetypes "github.com/compose-spec/compose-go/types" "github.com/loft-sh/devspace/pkg/devspace/config/constants" @@ -37,18 +38,23 @@ func LoadDockerComposeProject(path string) (*composetypes.Project, error) { if err != nil { return nil, err } - - project, err := composeloader.Load(composetypes.ConfigDetails{ + + project, err := composeloader.LoadWithContext(context.Background(), composetypes.ConfigDetails{ ConfigFiles: []composetypes.ConfigFile{ { Content: composeFile, }, }, + }, func(o *composeloader.Options) { + o.ResolvePaths = false }) if err != nil { + if strings.Contains(err.Error(), "project name must not be empty") { + return nil, fmt.Errorf("docker-compose.yaml requires 'name' %w", err) + } return nil, err } - + // Expand service ports for idx, service := range project.Services { ports := []composetypes.ServicePortConfig{} @@ -61,7 +67,7 @@ func LoadDockerComposeProject(path string) (*composetypes.Project, error) { } project.Services[idx].Ports = ports } - + return project, nil } @@ -88,12 +94,12 @@ func (cm *composeManager) Load(log log.Logger) error { if err != nil { return err } - + builders := map[string]ConfigBuilder{} err = cm.project.WithServices(nil, func(service composetypes.ServiceConfig) error { configName := "docker-compose" workingDir := cm.project.WorkingDir - + isDependency := dependentsMap[service.Name] != nil if isDependency { configName = service.Name @@ -101,69 +107,69 @@ func (cm *composeManager) Load(log log.Logger) error { workingDir = filepath.Join(workingDir, service.Build.Context) } } - + builder := builders[configName] if builder == nil { builder = NewConfigBuilder(workingDir, log) builders[configName] = builder } - + builder.SetName(configName) - + err := builder.AddImage(cm.project, service) if err != nil { return err } - + err = builder.AddDeployment(cm.project, service) if err != nil { return err } - + err = builder.AddDev(service) if err != nil { return err } - + err = builder.AddSecret(cm.project, service) if err != nil { return err } - + err = builder.AddDependencies(cm.project, service) if err != nil { return err } - + return nil }) if err != nil { return err } - + err = cm.project.WithServices(nil, func(service composetypes.ServiceConfig) error { configName := "docker-compose" path := constants.DefaultConfigPath - + isDependency := dependentsMap[service.Name] != nil if isDependency { configName = service.Name - + path = "devspace-" + service.Name + ".yaml" if service.Build != nil && service.Build.Context != "" { path = filepath.Join(service.Build.Context, "devspace.yaml") } } - + builder := builders[configName] cm.configs[path] = builder.Config() - + return nil }) if err != nil { return err } - + return nil } @@ -181,13 +187,13 @@ func (cm *composeManager) Save() error { if err != nil { return err } - + err = os.WriteFile(path, configYaml, os.ModePerm) if err != nil { return err } } - + return nil } @@ -206,22 +212,22 @@ func expandPublishedPortRange(port composetypes.ServicePortConfig) ([]composetyp if !strings.Contains(port.Published, "-") { return []composetypes.ServicePortConfig{port}, nil } - + publishedRange := strings.Split(port.Published, "-") if len(publishedRange) > 2 { return nil, fmt.Errorf("invalid port range") } - + begin, err := strconv.Atoi(publishedRange[0]) if err != nil { return nil, fmt.Errorf("invalid port range %s: beginning value must be numeric", port.Published) } - + end, err := strconv.Atoi(publishedRange[1]) if err != nil { return nil, fmt.Errorf("invalid port range %s: end value must be numeric", port.Published) } - + var portConfigs []composetypes.ServicePortConfig for i := begin; i <= end; i++ { portConfigs = append(portConfigs, composetypes.ServicePortConfig{ @@ -232,6 +238,6 @@ func expandPublishedPortRange(port composetypes.ServicePortConfig) ([]composetyp Mode: "ingress", }) } - + return portConfigs, nil } diff --git a/pkg/devspace/compose/testdata/basic/docker-compose.yaml b/pkg/devspace/compose/testdata/basic/docker-compose.yaml index f8aa3028ae..4eff540c78 100644 --- a/pkg/devspace/compose/testdata/basic/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/basic/docker-compose.yaml @@ -1,3 +1,5 @@ +name: basic + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/build/docker-compose.yaml b/pkg/devspace/compose/testdata/build/docker-compose.yaml index 72274d6fb1..d1a8180539 100644 --- a/pkg/devspace/compose/testdata/build/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build + services: foo: build: foo diff --git a/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml b/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml index 5b37d2c691..f29b82ce07 100644 --- a/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_args_list/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_arg + services: foo: build: diff --git a/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml b/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml index 2c2fc7efe6..77cad5805e 100644 --- a/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_args_map/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_args_map + services: foo: build: diff --git a/pkg/devspace/compose/testdata/build_context/docker-compose.yaml b/pkg/devspace/compose/testdata/build_context/docker-compose.yaml index 46b3316b4a..94ab53aaa4 100644 --- a/pkg/devspace/compose/testdata/build_context/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_context/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_context + services: foo: build: diff --git a/pkg/devspace/compose/testdata/build_dockerfile/docker-compose.yaml b/pkg/devspace/compose/testdata/build_dockerfile/docker-compose.yaml index f153a6dbf1..881f832c24 100644 --- a/pkg/devspace/compose/testdata/build_dockerfile/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_dockerfile/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_dockerfile + services: foo: build: diff --git a/pkg/devspace/compose/testdata/build_entry_point/docker-compose.yaml b/pkg/devspace/compose/testdata/build_entry_point/docker-compose.yaml index 64bbb2c144..7c46a98bc5 100644 --- a/pkg/devspace/compose/testdata/build_entry_point/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_entry_point/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_entry_point + services: foo: build: diff --git a/pkg/devspace/compose/testdata/build_image/docker-compose.yaml b/pkg/devspace/compose/testdata/build_image/docker-compose.yaml index 9d211f1bb2..c532e545ea 100644 --- a/pkg/devspace/compose/testdata/build_image/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_image/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_image + services: foo: build: foo diff --git a/pkg/devspace/compose/testdata/build_image_tag/docker-compose.yaml b/pkg/devspace/compose/testdata/build_image_tag/docker-compose.yaml index 953da86bd8..e0b6cb44cd 100644 --- a/pkg/devspace/compose/testdata/build_image_tag/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_image_tag/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_image_tag + services: foo: build: foo diff --git a/pkg/devspace/compose/testdata/build_network/docker-compose.yaml b/pkg/devspace/compose/testdata/build_network/docker-compose.yaml index 314404df33..7ebc2befa9 100644 --- a/pkg/devspace/compose/testdata/build_network/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_network/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_network + services: foo: build: diff --git a/pkg/devspace/compose/testdata/build_target/docker-compose.yaml b/pkg/devspace/compose/testdata/build_target/docker-compose.yaml index e72217459d..dc77cff5b3 100644 --- a/pkg/devspace/compose/testdata/build_target/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/build_target/docker-compose.yaml @@ -1,3 +1,5 @@ +name: build_target + services: foo: build: diff --git a/pkg/devspace/compose/testdata/command/docker-compose.yaml b/pkg/devspace/compose/testdata/command/docker-compose.yaml index 7bd901a399..a159b46f15 100644 --- a/pkg/devspace/compose/testdata/command/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/command/docker-compose.yaml @@ -1,3 +1,5 @@ +name: command + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/container_name/docker-compose.yaml b/pkg/devspace/compose/testdata/container_name/docker-compose.yaml index d4a420571d..d9dc645aed 100644 --- a/pkg/devspace/compose/testdata/container_name/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/container_name/docker-compose.yaml @@ -1,3 +1,5 @@ +name: container_name + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/depends_on/docker-compose.yaml b/pkg/devspace/compose/testdata/depends_on/docker-compose.yaml index b6fe03c5bb..bedd1f9ca8 100644 --- a/pkg/devspace/compose/testdata/depends_on/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/depends_on/docker-compose.yaml @@ -1,3 +1,5 @@ +name: depends_on + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml b/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml index 8c3c20cea6..04962f2669 100644 --- a/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/depends_on_with_build/docker-compose.yaml @@ -1,3 +1,5 @@ +name: depends_on_with_build + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/entry_point/docker-compose.yaml b/pkg/devspace/compose/testdata/entry_point/docker-compose.yaml index 56d3b99445..0b4f51f84a 100644 --- a/pkg/devspace/compose/testdata/entry_point/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/entry_point/docker-compose.yaml @@ -1,3 +1,5 @@ +name: entry_point + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/env_file_multiple/docker-compose.yaml b/pkg/devspace/compose/testdata/env_file_multiple/docker-compose.yaml index a44c8cdcdd..94ec171927 100644 --- a/pkg/devspace/compose/testdata/env_file_multiple/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/env_file_multiple/docker-compose.yaml @@ -1,3 +1,5 @@ +name: env_file_multiple + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/env_file_single/docker-compose.yaml b/pkg/devspace/compose/testdata/env_file_single/docker-compose.yaml index 855980931f..353006a59a 100644 --- a/pkg/devspace/compose/testdata/env_file_single/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/env_file_single/docker-compose.yaml @@ -1,3 +1,5 @@ +name: env_file_single + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/environment/docker-compose.yaml b/pkg/devspace/compose/testdata/environment/docker-compose.yaml index 31b5611b1b..eec1e1ea6b 100644 --- a/pkg/devspace/compose/testdata/environment/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/environment/docker-compose.yaml @@ -1,3 +1,5 @@ +name: environment + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/expose/docker-compose.yaml b/pkg/devspace/compose/testdata/expose/docker-compose.yaml index c609b94ec8..1617d52411 100644 --- a/pkg/devspace/compose/testdata/expose/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/expose/docker-compose.yaml @@ -1,3 +1,5 @@ +name: expose + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/extra_hosts_multiple/docker-compose.yaml b/pkg/devspace/compose/testdata/extra_hosts_multiple/docker-compose.yaml index 8eff6e4824..edfa264b5d 100644 --- a/pkg/devspace/compose/testdata/extra_hosts_multiple/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/extra_hosts_multiple/docker-compose.yaml @@ -1,3 +1,5 @@ +name: extra_hosts_multiple + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/extra_hosts_single/docker-compose.yaml b/pkg/devspace/compose/testdata/extra_hosts_single/docker-compose.yaml index 14b2530d07..77ea675c8c 100644 --- a/pkg/devspace/compose/testdata/extra_hosts_single/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/extra_hosts_single/docker-compose.yaml @@ -1,3 +1,5 @@ +name: extra_hosts_single + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/healthcheck/docker-compose.yaml b/pkg/devspace/compose/testdata/healthcheck/docker-compose.yaml index 499c1cc179..f4b0a4dfaf 100644 --- a/pkg/devspace/compose/testdata/healthcheck/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/healthcheck/docker-compose.yaml @@ -1,3 +1,5 @@ +name: healthcheck + services: cmd: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/ports-long/docker-compose.yaml b/pkg/devspace/compose/testdata/ports-long/docker-compose.yaml index 2a0bad9350..9650804301 100644 --- a/pkg/devspace/compose/testdata/ports-long/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/ports-long/docker-compose.yaml @@ -1,3 +1,5 @@ +name: ports-long + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/ports-short/docker-compose.yaml b/pkg/devspace/compose/testdata/ports-short/docker-compose.yaml index 448426638c..3c4c2ab177 100644 --- a/pkg/devspace/compose/testdata/ports-short/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/ports-short/docker-compose.yaml @@ -1,3 +1,5 @@ +name: ports-short + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/restart-always/docker-compose.yaml b/pkg/devspace/compose/testdata/restart-always/docker-compose.yaml index a23674cd35..1b87f396ee 100644 --- a/pkg/devspace/compose/testdata/restart-always/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/restart-always/docker-compose.yaml @@ -1,3 +1,5 @@ +name: restart-always + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/restart-no/docker-compose.yaml b/pkg/devspace/compose/testdata/restart-no/docker-compose.yaml index b8119c8bb6..b95dd81191 100644 --- a/pkg/devspace/compose/testdata/restart-no/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/restart-no/docker-compose.yaml @@ -1,3 +1,5 @@ +name: restart-no + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/restart-on-failure/docker-compose.yaml b/pkg/devspace/compose/testdata/restart-on-failure/docker-compose.yaml index 7e48054305..a9d925e7ce 100644 --- a/pkg/devspace/compose/testdata/restart-on-failure/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/restart-on-failure/docker-compose.yaml @@ -1,3 +1,5 @@ +name: restart_on_failure + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/restart-unless-stopped/docker-compose.yaml b/pkg/devspace/compose/testdata/restart-unless-stopped/docker-compose.yaml index 9b4fe7cc28..81130b78b3 100644 --- a/pkg/devspace/compose/testdata/restart-unless-stopped/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/restart-unless-stopped/docker-compose.yaml @@ -1,3 +1,5 @@ +name: restart-unless-stopped + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/secret-long/docker-compose.yaml b/pkg/devspace/compose/testdata/secret-long/docker-compose.yaml index 0c72cb7dd9..2acce3db06 100644 --- a/pkg/devspace/compose/testdata/secret-long/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/secret-long/docker-compose.yaml @@ -1,3 +1,5 @@ +name: secret-long + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/secret-short/docker-compose.yaml b/pkg/devspace/compose/testdata/secret-short/docker-compose.yaml index ecce9295fc..910dc60d83 100644 --- a/pkg/devspace/compose/testdata/secret-short/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/secret-short/docker-compose.yaml @@ -1,3 +1,5 @@ +name: secret-short + services: db: image: mysql/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml index 5712e59b48..b7d004f139 100644 --- a/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-depends_on/docker-compose.yaml @@ -1,3 +1,5 @@ +name: volumes-depends + services: db: image: loft.sh/mysql-server:8.0.19 diff --git a/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml index 048c839a43..f54adf24cc 100644 --- a/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-long/docker-compose.yaml @@ -1,3 +1,5 @@ +name: volumes-long + services: db: image: alpine diff --git a/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml b/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml index 3bbb8838eb..a86d012c39 100644 --- a/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml +++ b/pkg/devspace/compose/testdata/volumes-short/docker-compose.yaml @@ -1,3 +1,5 @@ +name: volumes-short + services: db: image: alpine diff --git a/vendor/github.com/compose-spec/compose-go/consts/consts.go b/vendor/github.com/compose-spec/compose-go/consts/consts.go index bf5cc9f1b1..76bdb82e1c 100644 --- a/vendor/github.com/compose-spec/compose-go/consts/consts.go +++ b/vendor/github.com/compose-spec/compose-go/consts/consts.go @@ -20,4 +20,5 @@ const ( ComposeProjectName = "COMPOSE_PROJECT_NAME" ComposePathSeparator = "COMPOSE_PATH_SEPARATOR" ComposeFilePath = "COMPOSE_FILE" + ComposeProfiles = "COMPOSE_PROFILES" ) diff --git a/vendor/github.com/compose-spec/compose-go/dotenv/env.go b/vendor/github.com/compose-spec/compose-go/dotenv/env.go new file mode 100644 index 0000000000..c8a538bcb5 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/dotenv/env.go @@ -0,0 +1,84 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package dotenv + +import ( + "bytes" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +func GetEnvFromFile(currentEnv map[string]string, workingDir string, filenames []string) (map[string]string, error) { + envMap := make(map[string]string) + + dotEnvFiles := filenames + if len(dotEnvFiles) == 0 { + dotEnvFiles = append(dotEnvFiles, filepath.Join(workingDir, ".env")) + } + for _, dotEnvFile := range dotEnvFiles { + abs, err := filepath.Abs(dotEnvFile) + if err != nil { + return envMap, err + } + dotEnvFile = abs + + s, err := os.Stat(dotEnvFile) + if os.IsNotExist(err) { + if len(filenames) == 0 { + return envMap, nil + } + return envMap, errors.Errorf("Couldn't find env file: %s", dotEnvFile) + } + if err != nil { + return envMap, err + } + + if s.IsDir() { + if len(filenames) == 0 { + return envMap, nil + } + return envMap, errors.Errorf("%s is a directory", dotEnvFile) + } + + b, err := os.ReadFile(dotEnvFile) + if os.IsNotExist(err) { + return nil, errors.Errorf("Couldn't read env file: %s", dotEnvFile) + } + if err != nil { + return envMap, err + } + + env, err := ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) { + v, ok := currentEnv[k] + if ok { + return v, true + } + v, ok = envMap[k] + return v, ok + }) + if err != nil { + return envMap, errors.Wrapf(err, "failed to read %s", dotEnvFile) + } + for k, v := range env { + envMap[k] = v + } + } + + return envMap, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go b/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go index 479831aac8..9b95c990e9 100644 --- a/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go +++ b/vendor/github.com/compose-spec/compose-go/dotenv/godotenv.go @@ -4,29 +4,28 @@ // // The TL;DR is that you make a .env file that looks something like // -// SOME_ENV_VAR=somevalue +// SOME_ENV_VAR=somevalue // // and then in your go code you can call // -// godotenv.Load() +// godotenv.Load() // // and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") package dotenv import ( - "errors" - "fmt" + "bytes" "io" - "io/ioutil" "os" - "os/exec" "regexp" - "sort" - "strconv" "strings" + + "github.com/compose-spec/compose-go/template" ) -const doubleQuoteSpecialChars = "\\\n\r\"!$`" +var utf8BOM = []byte("\uFEFF") + +var startsWithDigitRegex = regexp.MustCompile(`^\s*\d.*`) // Keys starting with numbers are ignored // LookupFn represents a lookup function to resolve variables from type LookupFn func(string) (string, bool) @@ -42,151 +41,84 @@ func Parse(r io.Reader) (map[string]string, error) { // ParseWithLookup reads an env file from io.Reader, returning a map of keys and values. func ParseWithLookup(r io.Reader, lookupFn LookupFn) (map[string]string, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } + // seek past the UTF-8 BOM if it exists (particularly on Windows, some + // editors tend to add it, and it'll cause parsing to fail) + data = bytes.TrimPrefix(data, utf8BOM) + return UnmarshalBytesWithLookup(data, lookupFn) } // Load will read your env file(s) and load them into ENV for this process. // -// Call this function as close as possible to the start of your program (ideally in main) +// Call this function as close as possible to the start of your program (ideally in main). // -// If you call Load without any args it will default to loading .env in the current path +// If you call Load without any args it will default to loading .env in the current path. // -// You can otherwise tell it which files to load (there can be more than one) like +// You can otherwise tell it which files to load (there can be more than one) like: // -// godotenv.Load("fileone", "filetwo") +// godotenv.Load("fileone", "filetwo") // // It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults -func Load(filenames ...string) (err error) { +func Load(filenames ...string) error { return load(false, filenames...) } -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Overload without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. -func Overload(filenames ...string) (err error) { - return load(true, filenames...) -} - -func load(overload bool, filenames ...string) (err error) { +func load(overload bool, filenames ...string) error { filenames = filenamesOrDefault(filenames) - for _, filename := range filenames { - err = loadFile(filename, overload) + err := loadFile(filename, overload) if err != nil { - return // return early on a spazout + return err } } - return + return nil } // ReadWithLookup gets all env vars from the files and/or lookup function and return values as // a map rather than automatically writing values into env -func ReadWithLookup(lookupFn LookupFn, filenames ...string) (envMap map[string]string, err error) { +func ReadWithLookup(lookupFn LookupFn, filenames ...string) (map[string]string, error) { filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) + envMap := make(map[string]string) for _, filename := range filenames { individualEnvMap, individualErr := readFile(filename, lookupFn) if individualErr != nil { - err = individualErr - return // return early on a spazout + return envMap, individualErr } for key, value := range individualEnvMap { + if startsWithDigitRegex.MatchString(key) { + continue + } envMap[key] = value } } - return + return envMap, nil } // Read all env (with same file loading semantics as Load) but return values as // a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { +func Read(filenames ...string) (map[string]string, error) { return ReadWithLookup(nil, filenames...) } -// Unmarshal reads an env file from a string, returning a map of keys and values. -func Unmarshal(str string) (envMap map[string]string, err error) { - return UnmarshalBytes([]byte(str)) -} - -// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values. -func UnmarshalBytes(src []byte) (map[string]string, error) { - return UnmarshalBytesWithLookup(src, nil) -} - // UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values. func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) { - out := make(map[string]string) - err := parseBytes(src, out, lookupFn) - return out, err + return UnmarshalWithLookup(string(src), lookupFn) } -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run() -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string) error { - if err := Load(filenames...); err != nil { - return err - } - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -// Write serializes the given environment and writes it to a file -func Write(envMap map[string]string, filename string) error { - content, err := Marshal(envMap) - if err != nil { - return err - } - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - _, err = file.WriteString(content + "\n") - if err != nil { - return err - } - return file.Sync() -} - -// Marshal outputs the given environment as a dotenv-formatted environment file. -// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. -func Marshal(envMap map[string]string) (string, error) { - lines := make([]string, 0, len(envMap)) - for k, v := range envMap { - if d, err := strconv.Atoi(v); err == nil { - lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) - } else { - lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) - } - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil +// UnmarshalWithLookup parses env file from string, returning a map of keys and values. +func UnmarshalWithLookup(src string, lookupFn LookupFn) (map[string]string, error) { + out := make(map[string]string) + err := newParser().parse(src, out, lookupFn) + return out, err } func filenamesOrDefault(filenames []string) []string { @@ -218,158 +150,26 @@ func loadFile(filename string, overload bool) error { return nil } -func readFile(filename string, lookupFn LookupFn) (envMap map[string]string, err error) { +func readFile(filename string, lookupFn LookupFn) (map[string]string, error) { file, err := os.Open(filename) if err != nil { - return + return nil, err } defer file.Close() return ParseWithLookup(file, lookupFn) } -var exportRegex = regexp.MustCompile(`^\s*(?:export\s+)?(.*?)\s*$`) - -func parseLine(line string, envMap map[string]string) (key string, value string, err error) { - return parseLineWithLookup(line, envMap, nil) -} -func parseLineWithLookup(line string, envMap map[string]string, lookupFn LookupFn) (key string, value string, err error) { - if len(line) == 0 { - err = errors.New("zero length string") - return - } - - // ditch the comments (but keep quoted hashes) - if strings.Contains(line, "#") { - segmentsBetweenHashes := strings.Split(line, "#") - quotesAreOpen := false - var segmentsToKeep []string - for _, segment := range segmentsBetweenHashes { - if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { - if quotesAreOpen { - quotesAreOpen = false - segmentsToKeep = append(segmentsToKeep, segment) - } else { - quotesAreOpen = true - } - } - - if len(segmentsToKeep) == 0 || quotesAreOpen { - segmentsToKeep = append(segmentsToKeep, segment) - } +func expandVariables(value string, envMap map[string]string, lookupFn LookupFn) (string, error) { + retVal, err := template.Substitute(value, func(k string) (string, bool) { + if v, ok := lookupFn(k); ok { + return v, true } - - line = strings.Join(segmentsToKeep, "#") - } - - firstEquals := strings.Index(line, "=") - firstColon := strings.Index(line, ":") - splitString := strings.SplitN(line, "=", 2) - if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { - // This is a yaml-style line - splitString = strings.SplitN(line, ":", 2) - } - - if len(splitString) != 2 { - err = errors.New("can't separate key from value") - return - } - key = exportRegex.ReplaceAllString(splitString[0], "$1") - - // Parse the value - value = parseValue(splitString[1], envMap, lookupFn) - return -} - -var ( - singleQuotesRegex = regexp.MustCompile(`\A'(.*)'\z`) - doubleQuotesRegex = regexp.MustCompile(`\A"(.*)"\z`) - escapeRegex = regexp.MustCompile(`\\.`) - unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) -) - -func parseValue(value string, envMap map[string]string, lookupFn LookupFn) string { - - // trim - value = strings.Trim(value, " ") - - // check if we've got quoted values or possible escapes - if len(value) > 1 { - singleQuotes := singleQuotesRegex.FindStringSubmatch(value) - - doubleQuotes := doubleQuotesRegex.FindStringSubmatch(value) - - if singleQuotes != nil || doubleQuotes != nil { - // pull the quotes off the edges - value = value[1 : len(value)-1] - } - - if doubleQuotes != nil { - // expand newlines - value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: - return match - } - }) - // unescape characters - value = unescapeCharsRegex.ReplaceAllString(value, "$1") - } - - if singleQuotes == nil { - value = expandVariables(value, envMap, lookupFn) - } - } - - return value -} - -var expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) - -func expandVariables(v string, envMap map[string]string, lookupFn LookupFn) string { - return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { - submatch := expandVarRegex.FindStringSubmatch(s) - - if submatch == nil { - return s - } - if submatch[1] == "\\" || submatch[2] == "(" { - return submatch[0][1:] - } else if submatch[4] != "" { - // first check if we have defined this already earlier - if envMap[submatch[4]] != "" { - return envMap[submatch[4]] - } - if lookupFn == nil { - return "" - } - // if we have not defined it, check the lookup function provided - // by the user - s2, ok := lookupFn(submatch[4]) - if ok { - return s2 - } - return "" - } - return s + v, ok := envMap[k] + return v, ok }) -} - -func doubleQuoteEscape(line string) string { - for _, c := range doubleQuoteSpecialChars { - toReplace := "\\" + string(c) - if c == '\n' { - toReplace = `\n` - } - if c == '\r' { - toReplace = `\r` - } - line = strings.Replace(line, string(c), toReplace, -1) + if err != nil { + return value, err } - return line + return retVal, nil } diff --git a/vendor/github.com/compose-spec/compose-go/dotenv/parser.go b/vendor/github.com/compose-spec/compose-go/dotenv/parser.go index 85ed2c0088..11b6d027c9 100644 --- a/vendor/github.com/compose-spec/compose-go/dotenv/parser.go +++ b/vendor/github.com/compose-spec/compose-go/dotenv/parser.go @@ -1,9 +1,10 @@ package dotenv import ( - "bytes" "errors" "fmt" + "regexp" + "strconv" "strings" "unicode" ) @@ -12,32 +13,44 @@ const ( charComment = '#' prefixSingleQuote = '\'' prefixDoubleQuote = '"' +) - exportPrefix = "export" +var ( + escapeSeqRegex = regexp.MustCompile(`(\\(?:[abcfnrtv$"\\]|0\d{0,3}))`) + exportRegex = regexp.MustCompile(`^export\s+`) ) -func parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error { +type parser struct { + line int +} + +func newParser() *parser { + return &parser{ + line: 1, + } +} + +func (p *parser) parse(src string, out map[string]string, lookupFn LookupFn) error { cutset := src + if lookupFn == nil { + lookupFn = noLookupFn + } for { - cutset = getStatementStart(cutset) - if cutset == nil { + cutset = p.getStatementStart(cutset) + if cutset == "" { // reached end of file break } - key, left, inherited, err := locateKeyName(cutset) + key, left, inherited, err := p.locateKeyName(cutset) if err != nil { return err } if strings.Contains(key, " ") { - return errors.New("key cannot contain a space") + return fmt.Errorf("line %d: key cannot contain a space", p.line) } if inherited { - if lookupFn == nil { - lookupFn = noLookupFn - } - value, ok := lookupFn(key) if ok { out[key] = value @@ -46,7 +59,7 @@ func parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error { continue } - value, left, err := extractVarValue(left, out, lookupFn) + value, left, err := p.extractVarValue(left, out, lookupFn) if err != nil { return err } @@ -61,10 +74,10 @@ func parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error { // getStatementPosition returns position of statement begin. // // It skips any comment line or non-whitespace character. -func getStatementStart(src []byte) []byte { - pos := indexOfNonSpaceChar(src) +func (p *parser) getStatementStart(src string) string { + pos := p.indexOfNonSpaceChar(src) if pos == -1 { - return nil + return "" } src = src[pos:] @@ -73,139 +86,172 @@ func getStatementStart(src []byte) []byte { } // skip comment section - pos = bytes.IndexFunc(src, isCharFunc('\n')) + pos = strings.IndexFunc(src, isCharFunc('\n')) if pos == -1 { - return nil + return "" } - - return getStatementStart(src[pos:]) + return p.getStatementStart(src[pos:]) } // locateKeyName locates and parses key name and returns rest of slice -func locateKeyName(src []byte) (key string, cutset []byte, inherited bool, err error) { +func (p *parser) locateKeyName(src string) (string, string, bool, error) { + var key string + var inherited bool // trim "export" and space at beginning - src = bytes.TrimLeftFunc(bytes.TrimPrefix(src, []byte(exportPrefix)), isSpace) + if exportRegex.MatchString(src) { + // we use a `strings.trim` to preserve the pointer to the same underlying memory. + // a regexp replace would copy the string. + src = strings.TrimLeftFunc(strings.TrimPrefix(src, "export"), isSpace) + } // locate key name end and validate it in single loop offset := 0 loop: - for i, char := range src { - rchar := rune(char) - if isSpace(rchar) { + for i, rune := range src { + if isSpace(rune) { continue } - switch char { + switch rune { case '=', ':', '\n': // library also supports yaml-style value declaration key = string(src[0:i]) offset = i + 1 - inherited = char == '\n' + inherited = rune == '\n' break loop - case '_': + case '_', '.', '-', '[', ']': default: - // variable name should match [A-Za-z0-9_] - if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) { + // variable name should match [A-Za-z0-9_.-] + if unicode.IsLetter(rune) || unicode.IsNumber(rune) { continue } - return "", nil, inherited, fmt.Errorf( - `unexpected character %q in variable name near %q`, - string(char), string(src)) + return "", "", inherited, fmt.Errorf( + `line %d: unexpected character %q in variable name %q`, + p.line, string(rune), strings.Split(src, "\n")[0]) } } - if len(src) == 0 { - return "", nil, inherited, errors.New("zero length string") + if src == "" { + return "", "", inherited, errors.New("zero length string") } // trim whitespace key = strings.TrimRightFunc(key, unicode.IsSpace) - cutset = bytes.TrimLeftFunc(src[offset:], isSpace) + cutset := strings.TrimLeftFunc(src[offset:], isSpace) return key, cutset, inherited, nil } // extractVarValue extracts variable value and returns rest of slice -func extractVarValue(src []byte, envMap map[string]string, lookupFn LookupFn) (value string, rest []byte, err error) { +func (p *parser) extractVarValue(src string, envMap map[string]string, lookupFn LookupFn) (string, string, error) { quote, isQuoted := hasQuotePrefix(src) if !isQuoted { // unquoted value - read until new line - end := bytes.IndexFunc(src, isNewLine) - var rest []byte - if end < 0 { - value := strings.Split(string(src), "#")[0] // Remove inline comments on unquoted lines - value = strings.TrimRightFunc(value, unicode.IsSpace) - return expandVariables(value, envMap, lookupFn), nil, nil - } + value, rest, _ := strings.Cut(src, "\n") + p.line++ - value := strings.Split(string(src[0:end]), "#")[0] + // Remove inline comments on unquoted lines + value, _, _ = strings.Cut(value, " #") value = strings.TrimRightFunc(value, unicode.IsSpace) - rest = src[end:] - return expandVariables(value, envMap, lookupFn), rest, nil + retVal, err := expandVariables(string(value), envMap, lookupFn) + return retVal, rest, err } + previousCharIsEscape := false // lookup quoted string terminator for i := 1; i < len(src); i++ { + if src[i] == '\n' { + p.line++ + } if char := src[i]; char != quote { + if !previousCharIsEscape && char == '\\' { + previousCharIsEscape = true + } else { + previousCharIsEscape = false + } continue } // skip escaped quote symbol (\" or \', depends on quote) - if prevChar := src[i-1]; prevChar == '\\' { + if previousCharIsEscape { + previousCharIsEscape = false continue } // trim quotes - trimFunc := isCharFunc(rune(quote)) - value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc)) + value := string(src[1:i]) if quote == prefixDoubleQuote { - // unescape newlines for double quote (this is compat feature) - // and expand environment variables - value = expandVariables(expandEscapes(value), envMap, lookupFn) + // expand standard shell escape sequences & then interpolate + // variables on the result + retVal, err := expandVariables(expandEscapes(value), envMap, lookupFn) + if err != nil { + return "", "", err + } + value = retVal } return value, src[i+1:], nil } // return formatted error if quoted string is not terminated - valEndIndex := bytes.IndexFunc(src, isCharFunc('\n')) + valEndIndex := strings.IndexFunc(src, isCharFunc('\n')) if valEndIndex == -1 { valEndIndex = len(src) } - return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex]) + return "", "", fmt.Errorf("line %d: unterminated quoted value %s", p.line, src[:valEndIndex]) } func expandEscapes(str string) string { - out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: + out := escapeSeqRegex.ReplaceAllStringFunc(str, func(match string) string { + if match == `\$` { + // `\$` is not a Go escape sequence, the expansion parser uses + // the special `$$` syntax + // both `FOO=\$bar` and `FOO=$$bar` are valid in an env file and + // will result in FOO w/ literal value of "$bar" (no interpolation) + return "$$" + } + + if strings.HasPrefix(match, `\0`) { + // octal escape sequences in Go are not prefixed with `\0`, so + // rewrite the prefix, e.g. `\0123` -> `\123` -> literal value "S" + match = strings.Replace(match, `\0`, `\`, 1) + } + + // use Go to unquote (unescape) the literal + // see https://go.dev/ref/spec#Rune_literals + // + // NOTE: Go supports ADDITIONAL escapes like `\x` & `\u` & `\U`! + // These are NOT supported, which is why we use a regex to find + // only matches we support and then use `UnquoteChar` instead of a + // `Unquote` on the entire value + v, _, _, err := strconv.UnquoteChar(match, '"') + if err != nil { return match } + return string(v) }) - return unescapeCharsRegex.ReplaceAllString(out, "$1") + return out } -func indexOfNonSpaceChar(src []byte) int { - return bytes.IndexFunc(src, func(r rune) bool { +func (p *parser) indexOfNonSpaceChar(src string) int { + return strings.IndexFunc(src, func(r rune) bool { + if r == '\n' { + p.line++ + } return !unicode.IsSpace(r) }) } // hasQuotePrefix reports whether charset starts with single or double quote and returns quote character -func hasQuotePrefix(src []byte) (quote byte, isQuoted bool) { - if len(src) == 0 { +func hasQuotePrefix(src string) (byte, bool) { + if src == "" { return 0, false } - switch prefix := src[0]; prefix { + switch quote := src[0]; quote { case prefixDoubleQuote, prefixSingleQuote: - return prefix, true + return quote, true // isQuoted default: return 0, false } @@ -227,8 +273,3 @@ func isSpace(r rune) bool { } return false } - -// isNewLine reports whether the rune is a new line character -func isNewLine(r rune) bool { - return r == '\n' -} diff --git a/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go b/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go index 9c36e6d8b1..305730838c 100644 --- a/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go +++ b/vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go @@ -18,9 +18,9 @@ package interpolation import ( "os" - "strings" "github.com/compose-spec/compose-go/template" + "github.com/compose-spec/compose-go/tree" "github.com/pkg/errors" ) @@ -29,7 +29,7 @@ type Options struct { // LookupValue from a key LookupValue LookupValue // TypeCastMapping maps key paths to functions to cast to a type - TypeCastMapping map[Path]Cast + TypeCastMapping map[tree.Path]Cast // Substitution function to use Substitute func(string, template.Mapping) (string, error) } @@ -49,7 +49,7 @@ func Interpolate(config map[string]interface{}, opts Options) (map[string]interf opts.LookupValue = os.LookupEnv } if opts.TypeCastMapping == nil { - opts.TypeCastMapping = make(map[Path]Cast) + opts.TypeCastMapping = make(map[tree.Path]Cast) } if opts.Substitute == nil { opts.Substitute = template.Substitute @@ -58,7 +58,7 @@ func Interpolate(config map[string]interface{}, opts Options) (map[string]interf out := map[string]interface{}{} for key, value := range config { - interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts) + interpolatedValue, err := recursiveInterpolate(value, tree.NewPath(key), opts) if err != nil { return out, err } @@ -68,11 +68,11 @@ func Interpolate(config map[string]interface{}, opts Options) (map[string]interf return out, nil } -func recursiveInterpolate(value interface{}, path Path, opts Options) (interface{}, error) { +func recursiveInterpolate(value interface{}, path tree.Path, opts Options) (interface{}, error) { switch value := value.(type) { case string: newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) - if err != nil || newValue == value { + if err != nil { return value, newPathError(path, err) } caster, ok := opts.getCasterForPath(path) @@ -96,7 +96,7 @@ func recursiveInterpolate(value interface{}, path Path, opts Options) (interface case []interface{}: out := make([]interface{}, len(value)) for i, elem := range value { - interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts) + interpolatedElem, err := recursiveInterpolate(elem, path.Next(tree.PathMatchList), opts) if err != nil { return nil, err } @@ -109,67 +109,22 @@ func recursiveInterpolate(value interface{}, path Path, opts Options) (interface } } -func newPathError(path Path, err error) error { +func newPathError(path tree.Path, err error) error { switch err := err.(type) { case nil: return nil case *template.InvalidTemplateError: return errors.Errorf( - "invalid interpolation format for %s: %#v. You may need to escape any $ with another $", + "invalid interpolation format for %s.\nYou may need to escape any $ with another $.\n%s", path, err.Template) default: return errors.Wrapf(err, "error while interpolating %s", path) } } -const pathSeparator = "." - -// PathMatchAll is a token used as part of a Path to match any key at that level -// in the nested structure -const PathMatchAll = "*" - -// PathMatchList is a token used as part of a Path to match items in a list -const PathMatchList = "[]" - -// Path is a dotted path of keys to a value in a nested mapping structure. A * -// section in a path will match any key in the mapping structure. -type Path string - -// NewPath returns a new Path -func NewPath(items ...string) Path { - return Path(strings.Join(items, pathSeparator)) -} - -// Next returns a new path by append part to the current path -func (p Path) Next(part string) Path { - return Path(string(p) + pathSeparator + part) -} - -func (p Path) parts() []string { - return strings.Split(string(p), pathSeparator) -} - -func (p Path) matches(pattern Path) bool { - patternParts := pattern.parts() - parts := p.parts() - - if len(patternParts) != len(parts) { - return false - } - for index, part := range parts { - switch patternParts[index] { - case PathMatchAll, part: - continue - default: - return false - } - } - return true -} - -func (o Options) getCasterForPath(path Path) (Cast, bool) { +func (o Options) getCasterForPath(path tree.Path) (Cast, bool) { for pattern, caster := range o.TypeCastMapping { - if path.matches(pattern) { + if path.Matches(pattern) { return caster, true } } diff --git a/vendor/github.com/compose-spec/compose-go/loader/example1.env b/vendor/github.com/compose-spec/compose-go/loader/example1.env index f19ec0df4e..61716e93b5 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/example1.env +++ b/vendor/github.com/compose-spec/compose-go/loader/example1.env @@ -1,5 +1,7 @@ # passed through FOO=foo_from_env_file +ENV.WITH.DOT=ok +ENV_WITH_UNDERSCORE=ok # overridden in example2.env BAR=bar_from_env_file diff --git a/vendor/github.com/compose-spec/compose-go/loader/full-example.yml b/vendor/github.com/compose-spec/compose-go/loader/full-example.yml index 4f17450ef7..24d954578d 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/full-example.yml +++ b/vendor/github.com/compose-spec/compose-go/loader/full-example.yml @@ -1,7 +1,15 @@ -name: Full_Example_project_name +name: full_example_project_name services: - foo: + bar: + build: + dockerfile_inline: | + FROM alpine + RUN echo "hello" > /world.txt + + foo: + annotations: + - com.example.foo=bar build: context: ./dir dockerfile: Dockerfile @@ -15,6 +23,22 @@ services: - foo - bar labels: [FOO=BAR] + additional_contexts: + foo: ./bar + secrets: + - secret1 + - source: secret2 + target: my_secret + uid: '103' + gid: '103' + mode: 0440 + tags: + - foo:v1.0.0 + - docker.io/username/foo:my-other-tag + - ${COMPOSE_PROJECT_NAME}:1.0.0 + platforms: + - linux/amd64 + - linux/arm64 cap_add: @@ -146,8 +170,8 @@ services: # somehost: "162.242.195.82" # otherhost: "50.31.209.229" extra_hosts: - - "somehost:162.242.195.82" - "otherhost:50.31.209.229" + - "somehost:162.242.195.82" hostname: foo @@ -157,6 +181,7 @@ services: timeout: 1s retries: 5 start_period: 15s + start_interval: 5s # Any valid image reference - repo, tag, id, sha image: redis @@ -168,6 +193,8 @@ services: ipc: host + uts: host + # Mapping or list # Mapping values can be strings, numbers or null labels: @@ -395,6 +422,7 @@ configs: external: true config4: name: foo + file: ~/config_data x-bar: baz x-foo: bar @@ -410,8 +438,11 @@ secrets: external: true secret4: name: bar + environment: BAR x-bar: baz x-foo: bar + secret5: + file: /abs/secret_data x-bar: baz x-foo: bar x-nested: diff --git a/vendor/github.com/compose-spec/compose-go/loader/include.go b/vendor/github.com/compose-spec/compose-go/loader/include.go new file mode 100644 index 0000000000..aaebfd3019 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/include.go @@ -0,0 +1,167 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "context" + "fmt" + "path/filepath" + "reflect" + + "github.com/compose-spec/compose-go/dotenv" + interp "github.com/compose-spec/compose-go/interpolation" + "github.com/compose-spec/compose-go/types" + "github.com/pkg/errors" +) + +// LoadIncludeConfig parse the require config from raw yaml +func LoadIncludeConfig(source []interface{}) ([]types.IncludeConfig, error) { + var requires []types.IncludeConfig + err := Transform(source, &requires) + return requires, err +} + +var transformIncludeConfig TransformerFunc = func(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return map[string]interface{}{"path": value}, nil + case map[string]interface{}: + return value, nil + default: + return data, errors.Errorf("invalid type %T for `include` configuration", value) + } +} + +func loadInclude(ctx context.Context, filename string, configDetails types.ConfigDetails, model *types.Config, options *Options, loaded []string) (*types.Config, map[string][]types.IncludeConfig, error) { + included := make(map[string][]types.IncludeConfig) + for _, r := range model.Include { + included[filename] = append(included[filename], r) + + for i, p := range r.Path { + for _, loader := range options.ResourceLoaders { + if loader.Accept(p) { + path, err := loader.Load(ctx, p) + if err != nil { + return nil, nil, err + } + p = path + break + } + } + r.Path[i] = absPath(configDetails.WorkingDir, p) + } + if r.ProjectDirectory == "" { + r.ProjectDirectory = filepath.Dir(r.Path[0]) + } + + loadOptions := options.clone() + loadOptions.SetProjectName(model.Name, true) + loadOptions.ResolvePaths = true + loadOptions.SkipNormalization = true + loadOptions.SkipConsistencyCheck = true + + envFromFile, err := dotenv.GetEnvFromFile(configDetails.Environment, r.ProjectDirectory, r.EnvFile) + if err != nil { + return nil, nil, err + } + + config := types.ConfigDetails{ + WorkingDir: r.ProjectDirectory, + ConfigFiles: types.ToConfigFiles(r.Path), + Environment: configDetails.Environment.Clone().Merge(envFromFile), + } + loadOptions.Interpolate = &interp.Options{ + Substitute: options.Interpolate.Substitute, + LookupValue: config.LookupEnv, + TypeCastMapping: options.Interpolate.TypeCastMapping, + } + imported, err := load(ctx, config, loadOptions, loaded) + if err != nil { + return nil, nil, err + } + for k, v := range imported.IncludeReferences { + included[k] = append(included[k], v...) + } + + err = importResources(model, imported, r.Path) + if err != nil { + return nil, nil, err + } + } + model.Include = nil + return model, included, nil +} + +// importResources import into model all resources defined by imported, and report error on conflict +func importResources(model *types.Config, imported *types.Project, path []string) error { + services := mapByName(model.Services) + for _, service := range imported.Services { + if present, ok := services[service.Name]; ok { + if reflect.DeepEqual(present, service) { + continue + } + return fmt.Errorf("imported compose file %s defines conflicting service %s", path, service.Name) + } + model.Services = append(model.Services, service) + } + for _, service := range imported.DisabledServices { + if disabled, ok := services[service.Name]; ok { + if reflect.DeepEqual(disabled, service) { + continue + } + return fmt.Errorf("imported compose file %s defines conflicting service %s", path, service.Name) + } + model.Services = append(model.Services, service) + } + for n, network := range imported.Networks { + if present, ok := model.Networks[n]; ok { + if reflect.DeepEqual(present, network) { + continue + } + return fmt.Errorf("imported compose file %s defines conflicting network %s", path, n) + } + model.Networks[n] = network + } + for n, volume := range imported.Volumes { + if present, ok := model.Volumes[n]; ok { + if reflect.DeepEqual(present, volume) { + continue + } + return fmt.Errorf("imported compose file %s defines conflicting volume %s", path, n) + } + model.Volumes[n] = volume + } + for n, secret := range imported.Secrets { + if present, ok := model.Secrets[n]; ok { + if reflect.DeepEqual(present, secret) { + continue + } + return fmt.Errorf("imported compose file %s defines conflicting secret %s", path, n) + } + model.Secrets[n] = secret + } + for n, config := range imported.Configs { + if present, ok := model.Configs[n]; ok { + if reflect.DeepEqual(present, config) { + continue + } + return fmt.Errorf("imported compose file %s defines conflicting config %s", path, n) + } + model.Configs[n] = config + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/interpolate.go b/vendor/github.com/compose-spec/compose-go/loader/interpolate.go index 97a19f5dd8..655e58e11f 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/interpolate.go +++ b/vendor/github.com/compose-spec/compose-go/loader/interpolate.go @@ -21,66 +21,61 @@ import ( "strings" interp "github.com/compose-spec/compose-go/interpolation" + "github.com/compose-spec/compose-go/tree" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -var interpolateTypeCastMapping = map[interp.Path]interp.Cast{ - servicePath("configs", interp.PathMatchList, "mode"): toInt, - servicePath("cpu_count"): toInt64, - servicePath("cpu_percent"): toFloat, - servicePath("cpu_period"): toInt64, - servicePath("cpu_quota"): toInt64, - servicePath("cpu_rt_period"): toInt64, - servicePath("cpu_rt_runtime"): toInt64, - servicePath("cpus"): toFloat32, - servicePath("cpu_shares"): toInt64, - servicePath("init"): toBoolean, - servicePath("deploy", "replicas"): toInt, - servicePath("deploy", "update_config", "parallelism"): toInt, - servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, - servicePath("deploy", "rollback_config", "parallelism"): toInt, - servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat, - servicePath("deploy", "restart_policy", "max_attempts"): toInt, - servicePath("deploy", "placement", "max_replicas_per_node"): toInt, - servicePath("healthcheck", "retries"): toInt, - servicePath("healthcheck", "disable"): toBoolean, - servicePath("mem_limit"): toUnitBytes, - servicePath("mem_reservation"): toUnitBytes, - servicePath("memswap_limit"): toUnitBytes, - servicePath("mem_swappiness"): toUnitBytes, - servicePath("oom_kill_disable"): toBoolean, - servicePath("oom_score_adj"): toInt64, - servicePath("pids_limit"): toInt64, - servicePath("ports", interp.PathMatchList, "target"): toInt, - servicePath("privileged"): toBoolean, - servicePath("read_only"): toBoolean, - servicePath("scale"): toInt, - servicePath("secrets", interp.PathMatchList, "mode"): toInt, - servicePath("shm_size"): toUnitBytes, - servicePath("stdin_open"): toBoolean, - servicePath("stop_grace_period"): toDuration, - servicePath("tty"): toBoolean, - servicePath("ulimits", interp.PathMatchAll): toInt, - servicePath("ulimits", interp.PathMatchAll, "hard"): toInt, - servicePath("ulimits", interp.PathMatchAll, "soft"): toInt, - servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean, - servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean, - servicePath("volumes", interp.PathMatchList, "tmpfs", "size"): toUnitBytes, - iPath("networks", interp.PathMatchAll, "external"): toBoolean, - iPath("networks", interp.PathMatchAll, "internal"): toBoolean, - iPath("networks", interp.PathMatchAll, "attachable"): toBoolean, - iPath("networks", interp.PathMatchAll, "enable_ipv6"): toBoolean, - iPath("volumes", interp.PathMatchAll, "external"): toBoolean, - iPath("secrets", interp.PathMatchAll, "external"): toBoolean, - iPath("configs", interp.PathMatchAll, "external"): toBoolean, +var interpolateTypeCastMapping = map[tree.Path]interp.Cast{ + servicePath("configs", tree.PathMatchList, "mode"): toInt, + servicePath("cpu_count"): toInt64, + servicePath("cpu_percent"): toFloat, + servicePath("cpu_period"): toInt64, + servicePath("cpu_quota"): toInt64, + servicePath("cpu_rt_period"): toInt64, + servicePath("cpu_rt_runtime"): toInt64, + servicePath("cpus"): toFloat32, + servicePath("cpu_shares"): toInt64, + servicePath("init"): toBoolean, + servicePath("deploy", "replicas"): toInt, + servicePath("deploy", "update_config", "parallelism"): toInt, + servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "rollback_config", "parallelism"): toInt, + servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "restart_policy", "max_attempts"): toInt, + servicePath("deploy", "placement", "max_replicas_per_node"): toInt, + servicePath("healthcheck", "retries"): toInt, + servicePath("healthcheck", "disable"): toBoolean, + servicePath("oom_kill_disable"): toBoolean, + servicePath("oom_score_adj"): toInt64, + servicePath("pids_limit"): toInt64, + servicePath("ports", tree.PathMatchList, "target"): toInt, + servicePath("privileged"): toBoolean, + servicePath("read_only"): toBoolean, + servicePath("scale"): toInt, + servicePath("secrets", tree.PathMatchList, "mode"): toInt, + servicePath("stdin_open"): toBoolean, + servicePath("tty"): toBoolean, + servicePath("ulimits", tree.PathMatchAll): toInt, + servicePath("ulimits", tree.PathMatchAll, "hard"): toInt, + servicePath("ulimits", tree.PathMatchAll, "soft"): toInt, + servicePath("volumes", tree.PathMatchList, "read_only"): toBoolean, + servicePath("volumes", tree.PathMatchList, "volume", "nocopy"): toBoolean, + iPath("networks", tree.PathMatchAll, "external"): toBoolean, + iPath("networks", tree.PathMatchAll, "internal"): toBoolean, + iPath("networks", tree.PathMatchAll, "attachable"): toBoolean, + iPath("networks", tree.PathMatchAll, "enable_ipv6"): toBoolean, + iPath("volumes", tree.PathMatchAll, "external"): toBoolean, + iPath("secrets", tree.PathMatchAll, "external"): toBoolean, + iPath("configs", tree.PathMatchAll, "external"): toBoolean, } -func iPath(parts ...string) interp.Path { - return interp.NewPath(parts...) +func iPath(parts ...string) tree.Path { + return tree.NewPath(parts...) } -func servicePath(parts ...string) interp.Path { - return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...) +func servicePath(parts ...string) tree.Path { + return iPath(append([]string{"services", tree.PathMatchAll}, parts...)...) } func toInt(value string) (interface{}, error) { @@ -91,14 +86,6 @@ func toInt64(value string) (interface{}, error) { return strconv.ParseInt(value, 10, 64) } -func toUnitBytes(value string) (interface{}, error) { - return transformSize(value) -} - -func toDuration(value string) (interface{}, error) { - return transformStringToDuration(value) -} - func toFloat(value string) (interface{}, error) { return strconv.ParseFloat(value, 64) } @@ -114,9 +101,15 @@ func toFloat32(value string) (interface{}, error) { // should match http://yaml.org/type/bool.html func toBoolean(value string) (interface{}, error) { switch strings.ToLower(value) { - case "y", "yes", "true", "on": + case "true": + return true, nil + case "false": + return false, nil + case "y", "yes", "on": + logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `true`", value) return true, nil - case "n", "no", "false", "off": + case "n", "no", "off": + logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `false`", value) return false, nil default: return nil, errors.Errorf("invalid boolean: %s", value) diff --git a/vendor/github.com/compose-spec/compose-go/loader/loader.go b/vendor/github.com/compose-spec/compose-go/loader/loader.go index 895bdb2609..a70004671d 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/loader.go +++ b/vendor/github.com/compose-spec/compose-go/loader/loader.go @@ -17,30 +17,27 @@ package loader import ( + "bytes" + "context" "fmt" - "io/ioutil" + "io" "os" - "path" + paths "path" "path/filepath" "reflect" "regexp" - "sort" "strconv" "strings" - "time" "github.com/compose-spec/compose-go/consts" - "github.com/compose-spec/compose-go/dotenv" interp "github.com/compose-spec/compose-go/interpolation" "github.com/compose-spec/compose-go/schema" "github.com/compose-spec/compose-go/template" "github.com/compose-spec/compose-go/types" - "github.com/docker/go-units" - "github.com/mattn/go-shellwords" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // Options supported by Load @@ -59,6 +56,10 @@ type Options struct { SkipConsistencyCheck bool // Skip extends SkipExtends bool + // SkipInclude will ignore `include` and only load model from file(s) set by ConfigDetails + SkipInclude bool + // SkipResolveEnvironment will ignore computing `environment` for services + SkipResolveEnvironment bool // Interpolation options Interpolate *interp.Options // Discard 'env_file' entries after resolving to 'environment' section @@ -67,10 +68,41 @@ type Options struct { projectName string // Indicates when the projectName was imperatively set or guessed from path projectNameImperativelySet bool + // Profiles set profiles to enable + Profiles []string + // ResourceLoaders manages support for remote resources + ResourceLoaders []ResourceLoader +} + +// ResourceLoader is a plugable remote resource resolver +type ResourceLoader interface { + // Accept returns `true` is the resource reference matches ResourceLoader supported protocol(s) + Accept(path string) bool + // Load returns the path to a local copy of remote resource identified by `path`. + Load(ctx context.Context, path string) (string, error) +} + +func (o *Options) clone() *Options { + return &Options{ + SkipValidation: o.SkipValidation, + SkipInterpolation: o.SkipInterpolation, + SkipNormalization: o.SkipNormalization, + ResolvePaths: o.ResolvePaths, + ConvertWindowsPaths: o.ConvertWindowsPaths, + SkipConsistencyCheck: o.SkipConsistencyCheck, + SkipExtends: o.SkipExtends, + SkipInclude: o.SkipInclude, + Interpolate: o.Interpolate, + discardEnvFiles: o.discardEnvFiles, + projectName: o.projectName, + projectNameImperativelySet: o.projectNameImperativelySet, + Profiles: o.Profiles, + ResourceLoaders: o.ResourceLoaders, + } } func (o *Options) SetProjectName(name string, imperativelySet bool) { - o.projectName = normalizeProjectName(name) + o.projectName = name o.projectNameImperativelySet = imperativelySet } @@ -125,26 +157,65 @@ func WithSkipValidation(opts *Options) { opts.SkipValidation = true } +// WithProfiles sets profiles to be activated +func WithProfiles(profiles []string) func(*Options) { + return func(opts *Options) { + opts.Profiles = profiles + } +} + // ParseYAML reads the bytes from a file, parses the bytes into a mapping // structure, and returns it. func ParseYAML(source []byte) (map[string]interface{}, error) { + r := bytes.NewReader(source) + decoder := yaml.NewDecoder(r) + m, _, err := parseYAML(decoder) + return m, err +} + +// PostProcessor is used to tweak compose model based on metadata extracted during yaml Unmarshal phase +// that hardly can be implemented using go-yaml and mapstructure +type PostProcessor interface { + yaml.Unmarshaler + + // Apply changes to compose model based on recorder metadata + Apply(config *types.Config) error +} + +func parseYAML(decoder *yaml.Decoder) (map[string]interface{}, PostProcessor, error) { var cfg interface{} - if err := yaml.Unmarshal(source, &cfg); err != nil { - return nil, err + processor := ResetProcessor{target: &cfg} + + if err := decoder.Decode(&processor); err != nil { + return nil, nil, err + } + stringMap, ok := cfg.(map[string]interface{}) + if ok { + converted, err := convertToStringKeysRecursive(stringMap, "") + if err != nil { + return nil, nil, err + } + return converted.(map[string]interface{}), &processor, nil } cfgMap, ok := cfg.(map[interface{}]interface{}) if !ok { - return nil, errors.Errorf("Top-level object must be a mapping") + return nil, nil, errors.Errorf("Top-level object must be a mapping") } converted, err := convertToStringKeysRecursive(cfgMap, "") if err != nil { - return nil, err + return nil, nil, err } - return converted.(map[string]interface{}), nil + return converted.(map[string]interface{}), &processor, nil } -// Load reads a ConfigDetails and returns a fully loaded configuration +// Load reads a ConfigDetails and returns a fully loaded configuration. +// Deprecated: use LoadWithContext. func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { + return LoadWithContext(context.Background(), configDetails, options...) +} + +// LoadWithContext reads a ConfigDetails and returns a fully loaded configuration +func LoadWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { if len(configDetails.ConfigFiles) < 1 { return nil, errors.Errorf("No files specified") } @@ -155,70 +226,129 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types. LookupValue: configDetails.LookupEnv, TypeCastMapping: interpolateTypeCastMapping, }, + ResolvePaths: true, } for _, op := range options { op(opts) } - var configs []*types.Config - for i, file := range configDetails.ConfigFiles { + projectName, err := projectName(configDetails, opts) + if err != nil { + return nil, err + } + opts.projectName = projectName + + // TODO(milas): this should probably ALWAYS set (overriding any existing) + if _, ok := configDetails.Environment[consts.ComposeProjectName]; !ok && projectName != "" { + if configDetails.Environment == nil { + configDetails.Environment = map[string]string{} + } + configDetails.Environment[consts.ComposeProjectName] = projectName + } + + return load(ctx, configDetails, opts, nil) +} + +func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options, loaded []string) (*types.Project, error) { + var model *types.Config + + mainFile := configDetails.ConfigFiles[0].Filename + for _, f := range loaded { + if f == mainFile { + loaded = append(loaded, mainFile) + return nil, errors.Errorf("include cycle detected:\n%s\n include %s", loaded[0], strings.Join(loaded[1:], "\n include ")) + } + } + loaded = append(loaded, mainFile) + + includeRefs := make(map[string][]types.IncludeConfig) + for _, file := range configDetails.ConfigFiles { + var postProcessor PostProcessor configDict := file.Config - if configDict == nil { - dict, err := parseConfig(file.Content, opts) - if err != nil { - return nil, err + + processYaml := func() error { + if !opts.SkipValidation { + if err := schema.Validate(configDict); err != nil { + return fmt.Errorf("validating %s: %w", file.Filename, err) + } } - configDict = dict - file.Config = dict - configDetails.ConfigFiles[i] = file - } - if !opts.SkipValidation { - if err := schema.Validate(configDict); err != nil { - return nil, err + configDict = groupXFieldsIntoExtensions(configDict) + + cfg, err := loadSections(ctx, file.Filename, configDict, configDetails, opts) + if err != nil { + return err } - } - configDict = groupXFieldsIntoExtensions(configDict) + if !opts.SkipInclude { + var included map[string][]types.IncludeConfig + cfg, included, err = loadInclude(ctx, file.Filename, configDetails, cfg, opts, loaded) + if err != nil { + return err + } + for k, v := range included { + includeRefs[k] = append(includeRefs[k], v...) + } + } - cfg, err := loadSections(file.Filename, configDict, configDetails, opts) - if err != nil { - return nil, err - } - if opts.discardEnvFiles { - for i := range cfg.Services { - cfg.Services[i].EnvFile = nil + if model == nil { + model = cfg + } else { + merged, err := merge([]*types.Config{model, cfg}) + if err != nil { + return err + } + model = merged + } + if postProcessor != nil { + err = postProcessor.Apply(model) + if err != nil { + return err + } } + return nil } - configs = append(configs, cfg) - } + if configDict == nil { + if len(file.Content) == 0 { + content, err := os.ReadFile(file.Filename) + if err != nil { + return nil, err + } + file.Content = content + } - model, err := merge(configs) - if err != nil { - return nil, err - } + r := bytes.NewReader(file.Content) + decoder := yaml.NewDecoder(r) + for { + dict, p, err := parseConfig(decoder, opts) + if err != nil { + if err != io.EOF { + return nil, fmt.Errorf("parsing %s: %w", file.Filename, err) + } + break + } + configDict = dict + postProcessor = p - for _, s := range model.Services { - var newEnvFiles types.StringList - for _, ef := range s.EnvFile { - newEnvFiles = append(newEnvFiles, absPath(configDetails.WorkingDir, ef)) + if err := processYaml(); err != nil { + return nil, err + } + } + } else { + if err := processYaml(); err != nil { + return nil, err + } } - s.EnvFile = newEnvFiles } - projectName, projectNameImperativelySet := opts.GetProjectName() - model.Name = normalizeProjectName(model.Name) - if !projectNameImperativelySet && model.Name != "" { - projectName = model.Name + if model == nil { + return nil, errors.New("empty compose file") } - if projectName != "" { - configDetails.Environment[consts.ComposeProjectName] = projectName - } project := &types.Project{ - Name: projectName, + Name: opts.projectName, WorkingDir: configDetails.WorkingDir, Services: model.Services, Networks: model.Networks, @@ -229,15 +359,44 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types. Extensions: model.Extensions, } + if len(includeRefs) != 0 { + project.IncludeReferences = includeRefs + } + if !opts.SkipNormalization { - err = normalize(project, opts.ResolvePaths) + err := Normalize(project) if err != nil { return nil, err } } + if opts.ResolvePaths { + err := ResolveRelativePaths(project) + if err != nil { + return nil, err + } + } + + if opts.ConvertWindowsPaths { + for i, service := range project.Services { + for j, volume := range service.Volumes { + service.Volumes[j] = convertVolumePath(volume) + } + project.Services[i] = service + } + } + if !opts.SkipConsistencyCheck { - err = checkConsistency(project) + err := checkConsistency(project) + if err != nil { + return nil, err + } + } + + project.ApplyProfiles(opts.Profiles) + + if !opts.SkipResolveEnvironment { + err := project.ResolveServicesEnvironment(opts.discardEnvFiles) if err != nil { return nil, err } @@ -246,24 +405,95 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types. return project, nil } -func normalizeProjectName(s string) string { +func InvalidProjectNameErr(v string) error { + return fmt.Errorf( + "invalid project name %q: must consist only of lowercase alphanumeric characters, hyphens, and underscores as well as start with a letter or number", + v, + ) +} + +// projectName determines the canonical name to use for the project considering +// the loader Options as well as `name` fields in Compose YAML fields (which +// also support interpolation). +// +// TODO(milas): restructure loading so that we don't need to re-parse the YAML +// here, as it's both wasteful and makes this code error-prone. +func projectName(details types.ConfigDetails, opts *Options) (string, error) { + projectName, projectNameImperativelySet := opts.GetProjectName() + + // if user did NOT provide a name explicitly, then see if one is defined + // in any of the config files + if !projectNameImperativelySet { + var pjNameFromConfigFile string + for _, configFile := range details.ConfigFiles { + yml, err := ParseYAML(configFile.Content) + if err != nil { + // HACK: the way that loading is currently structured, this is + // a duplicative parse just for the `name`. if it fails, we + // give up but don't return the error, knowing that it'll get + // caught downstream for us + return "", nil + } + if val, ok := yml["name"]; ok && val != "" { + sVal, ok := val.(string) + if !ok { + // HACK: see above - this is a temporary parsed version + // that hasn't been schema-validated, but we don't want + // to be the ones to actually report that, so give up, + // knowing that it'll get caught downstream for us + return "", nil + } + pjNameFromConfigFile = sVal + } + } + if !opts.SkipInterpolation { + interpolated, err := interp.Interpolate( + map[string]interface{}{"name": pjNameFromConfigFile}, + *opts.Interpolate, + ) + if err != nil { + return "", err + } + pjNameFromConfigFile = interpolated["name"].(string) + } + pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile) + if pjNameFromConfigFile != "" { + projectName = pjNameFromConfigFile + } + } + + if projectName == "" { + return "", errors.New("project name must not be empty") + } + + if NormalizeProjectName(projectName) != projectName { + return "", InvalidProjectNameErr(projectName) + } + + return projectName, nil +} + +func NormalizeProjectName(s string) string { r := regexp.MustCompile("[a-z0-9_-]") s = strings.ToLower(s) s = strings.Join(r.FindAllString(s, -1), "") return strings.TrimLeft(s, "_-") } -func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) { - yml, err := ParseYAML(b) +func parseConfig(decoder *yaml.Decoder, opts *Options) (map[string]interface{}, PostProcessor, error) { + yml, postProcessor, err := parseYAML(decoder) if err != nil { - return nil, err + return nil, nil, err } if !opts.SkipInterpolation { - return interp.Interpolate(yml, *opts.Interpolate) + interpolated, err := interp.Interpolate(yml, *opts.Interpolate) + return interpolated, postProcessor, err } - return yml, err + return yml, postProcessor, err } +const extensions = "#extensions" // Using # prefix, we prevent risk to conflict with an actual yaml key + func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} { extras := map[string]interface{}{} for key, value := range dict { @@ -276,12 +506,12 @@ func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interfac } } if len(extras) > 0 { - dict["extensions"] = extras + dict[extensions] = extras } return dict } -func loadSections(filename string, config map[string]interface{}, configDetails types.ConfigDetails, opts *Options) (*types.Config, error) { +func loadSections(ctx context.Context, filename string, config map[string]interface{}, configDetails types.ConfigDetails, opts *Options) (*types.Config, error) { var err error cfg := types.Config{ Filename: filename, @@ -294,11 +524,10 @@ func loadSections(filename string, config map[string]interface{}, configDetails } } cfg.Name = name - cfg.Services, err = LoadServices(filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts) + cfg.Services, err = LoadServices(ctx, filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts) if err != nil { return nil, err } - cfg.Networks, err = LoadNetworks(getSection(config, "networks")) if err != nil { return nil, err @@ -307,15 +536,19 @@ func loadSections(filename string, config map[string]interface{}, configDetails if err != nil { return nil, err } - cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"), configDetails, opts.ResolvePaths) + cfg.Secrets, err = LoadSecrets(getSection(config, "secrets")) + if err != nil { + return nil, err + } + cfg.Configs, err = LoadConfigObjs(getSection(config, "configs")) if err != nil { return nil, err } - cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"), configDetails, opts.ResolvePaths) + cfg.Include, err = LoadIncludeConfig(getSequence(config, "include")) if err != nil { return nil, err } - extensions := getSection(config, "extensions") + extensions := getSection(config, extensions) if len(extensions) > 0 { cfg.Extensions = extensions } @@ -330,6 +563,14 @@ func getSection(config map[string]interface{}, key string) map[string]interface{ return section.(map[string]interface{}) } +func getSequence(config map[string]interface{}, key string) []interface{} { + section, ok := config[key] + if !ok { + return make([]interface{}, 0) + } + return section.([]interface{}) +} + // ForbiddenPropertiesError is returned when there are properties in the Compose // file that are forbidden. type ForbiddenPropertiesError struct { @@ -347,8 +588,9 @@ func Transform(source interface{}, target interface{}, additionalTransformers .. config := &mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( createTransformHook(additionalTransformers...), - mapstructure.StringToTimeDurationHookFunc()), + decoderHook), Result: target, + TagName: "yaml", Metadata: &data, } decoder, err := mapstructure.NewDecoder(config) @@ -370,29 +612,22 @@ type Transformer struct { func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType { transforms := map[reflect.Type]func(interface{}) (interface{}, error){ reflect.TypeOf(types.External{}): transformExternal, - reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest, - reflect.TypeOf(types.ShellCommand{}): transformShellCommand, - reflect.TypeOf(types.StringList{}): transformStringList, - reflect.TypeOf(map[string]string{}): transformMapStringString, + reflect.TypeOf(types.Options{}): transformOptions, reflect.TypeOf(types.UlimitsConfig{}): transformUlimits, - reflect.TypeOf(types.UnitBytes(0)): transformSize, reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort, reflect.TypeOf(types.ServiceSecretConfig{}): transformFileReferenceConfig, reflect.TypeOf(types.ServiceConfigObjConfig{}): transformFileReferenceConfig, - reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList, reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap, reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false), reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true), - reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false), reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false), - reflect.TypeOf(types.HostsList{}): transformListOrMappingFunc(":", false), + reflect.TypeOf(types.HostsList{}): transformMappingOrListFunc(":", false), reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig, reflect.TypeOf(types.BuildConfig{}): transformBuildConfig, - reflect.TypeOf(types.Duration(0)): transformStringToDuration, reflect.TypeOf(types.DependsOnConfig{}): transformDependsOnConfig, reflect.TypeOf(types.ExtendsConfig{}): transformExtendsConfig, - reflect.TypeOf(types.DeviceRequest{}): transformServiceDeviceRequest, reflect.TypeOf(types.SSHConfig{}): transformSSHConfig, + reflect.TypeOf(types.IncludeConfig{}): transformIncludeConfig, } for _, transformer := range additionalTransformers { @@ -410,6 +645,22 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec // keys need to be converted to strings for jsonschema func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[string]interface{}); ok { + for key, entry := range mapping { + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = key + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, key) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + mapping[key] = convertedEntry + } + return mapping, nil + } if mapping, ok := value.(map[interface{}]interface{}); ok { dict := make(map[string]interface{}) for key, entry := range mapping { @@ -458,19 +709,20 @@ func formatInvalidKeyError(keyPrefix string, key interface{}) error { // LoadServices produces a ServiceConfig map from a compose file Dict // the servicesDict is not validated if directly used. Use Load() to enable validation -func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) { +func LoadServices(ctx context.Context, filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) { var services []types.ServiceConfig - x, ok := servicesDict["extensions"] + x, ok := servicesDict[extensions] if ok { // as a top-level attribute, "services" doesn't support extensions, and a service can be named `x-foo` for k, v := range x.(map[string]interface{}) { servicesDict[k] = v } + delete(servicesDict, extensions) } for name := range servicesDict { - serviceConfig, err := loadServiceWithExtends(filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{}) + serviceConfig, err := loadServiceWithExtends(ctx, filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{}) if err != nil { return nil, err } @@ -481,7 +733,7 @@ func LoadServices(filename string, servicesDict map[string]interface{}, workingD return services, nil } -func loadServiceWithExtends(filename, name string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options, ct *cycleTracker) (*types.ServiceConfig, error) { +func loadServiceWithExtends(ctx context.Context, filename, name string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options, ct *cycleTracker) (*types.ServiceConfig, error) { if err := ct.Add(filename, name); err != nil { return nil, err } @@ -491,62 +743,71 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, filename) } - serviceConfig, err := LoadService(name, target.(map[string]interface{}), workingDir, lookupEnv, opts.ResolvePaths, opts.ConvertWindowsPaths) + if target == nil { + target = map[string]interface{}{} + } + + serviceConfig, err := LoadService(name, target.(map[string]interface{})) if err != nil { return nil, err } if serviceConfig.Extends != nil && !opts.SkipExtends { - baseServiceName := *serviceConfig.Extends["service"] + baseServiceName := serviceConfig.Extends.Service var baseService *types.ServiceConfig - if file := serviceConfig.Extends["file"]; file == nil { - baseService, err = loadServiceWithExtends(filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct) + file := serviceConfig.Extends.File + if file == "" { + baseService, err = loadServiceWithExtends(ctx, filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct) if err != nil { return nil, err } } else { + for _, loader := range opts.ResourceLoaders { + if loader.Accept(file) { + path, err := loader.Load(ctx, file) + if err != nil { + return nil, err + } + file = path + break + } + } // Resolve the path to the imported file, and load it. - baseFilePath := absPath(workingDir, *file) + baseFilePath := absPath(workingDir, file) - bytes, err := ioutil.ReadFile(baseFilePath) + b, err := os.ReadFile(baseFilePath) if err != nil { return nil, err } - baseFile, err := parseConfig(bytes, opts) + r := bytes.NewReader(b) + decoder := yaml.NewDecoder(r) + + baseFile, _, err := parseConfig(decoder, opts) if err != nil { return nil, err } baseFileServices := getSection(baseFile, "services") - baseService, err = loadServiceWithExtends(baseFilePath, baseServiceName, baseFileServices, filepath.Dir(baseFilePath), lookupEnv, opts, ct) + baseService, err = loadServiceWithExtends(ctx, baseFilePath, baseServiceName, baseFileServices, filepath.Dir(baseFilePath), lookupEnv, opts, ct) if err != nil { return nil, err } // Make paths relative to the importing Compose file. Note that we - // make the paths relative to `*file` rather than `baseFilePath` so - // that the resulting paths won't be absolute if `*file` isn't an + // make the paths relative to `file` rather than `baseFilePath` so + // that the resulting paths won't be absolute if `file` isn't an // absolute path. - baseFileParent := filepath.Dir(*file) - if baseService.Build != nil { - // Note that the Dockerfile is always defined relative to the - // build context, so there's no need to update the Dockerfile field. - baseService.Build.Context = absPath(baseFileParent, baseService.Build.Context) - } - for i, vol := range baseService.Volumes { - if vol.Type != types.VolumeTypeBind { - continue - } - baseService.Volumes[i].Source = absPath(baseFileParent, vol.Source) - } + baseFileParent := filepath.Dir(file) + ResolveServiceRelativePaths(baseFileParent, baseService) } serviceConfig, err = _merge(baseService, serviceConfig) if err != nil { return nil, err } + serviceConfig.Extends = nil } return serviceConfig, nil @@ -554,7 +815,7 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter // LoadService produces a single ServiceConfig from a compose file Dict // the serviceDict is not validated if directly used. Use Load() to enable validation -func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, resolvePaths bool, convertPaths bool) (*types.ServiceConfig, error) { +func LoadService(name string, serviceDict map[string]interface{}) (*types.ServiceConfig, error) { serviceConfig := &types.ServiceConfig{ Scale: 1, } @@ -563,26 +824,15 @@ func LoadService(name string, serviceDict map[string]interface{}, workingDir str } serviceConfig.Name = name - if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil { - return nil, err - } - for i, volume := range serviceConfig.Volumes { if volume.Type != types.VolumeTypeBind { continue } - if volume.Source == "" { return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) } - if resolvePaths { - serviceConfig.Volumes[i] = resolveVolumePath(volume, workingDir, lookupEnv) - } - - if convertPaths { - serviceConfig.Volumes[i] = convertVolumePath(volume) - } + serviceConfig.Volumes[i] = volume } return serviceConfig, nil @@ -603,52 +853,22 @@ func convertVolumePath(volume types.ServiceVolumeConfig) types.ServiceVolumeConf return volume } -func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error { - environment := types.MappingWithEquals{} - - if len(serviceConfig.EnvFile) > 0 { - for _, envFile := range serviceConfig.EnvFile { - filePath := absPath(workingDir, envFile) - file, err := os.Open(filePath) - if err != nil { - return err - } - defer file.Close() - fileVars, err := dotenv.ParseWithLookup(file, dotenv.LookupFn(lookupEnv)) - if err != nil { - return err - } - env := types.MappingWithEquals{} - for k, v := range fileVars { - v := v - env[k] = &v - } - environment.OverrideBy(env.Resolve(lookupEnv).RemoveEmpty()) - } - } - - environment.OverrideBy(serviceConfig.Environment.Resolve(lookupEnv)) - serviceConfig.Environment = environment - return nil -} - -func resolveVolumePath(volume types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) types.ServiceVolumeConfig { - filePath := expandUser(volume.Source, lookupEnv) +func resolveMaybeUnixPath(workingDir string, path string) string { + filePath := expandUser(path) // Check if source is an absolute path (either Unix or Windows), to // handle a Windows client with a Unix daemon or vice-versa. // // Note that this is not required for Docker for Windows when specifying // a local Windows path, because Docker for Windows translates the Windows // path into a valid path within the VM. - if !path.IsAbs(filePath) && !isAbs(filePath) { + if !paths.IsAbs(filePath) && !isAbs(filePath) { filePath = absPath(workingDir, filePath) } - volume.Source = filePath - return volume + return filePath } // TODO: make this more robust -func expandUser(path string, lookupEnv template.Mapping) string { +func expandUser(path string) string { if strings.HasPrefix(path, "~") { home, err := os.UserHomeDir() if err != nil { @@ -695,7 +915,7 @@ func LoadNetworks(source map[string]interface{}) (map[string]types.NetworkConfig if network.Name != "" { return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name) } - logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name) + logrus.Warnf("network %s: network.external.name is deprecated. Please set network.name with external: true", name) network.Name = network.External.Name network.External.Name = "" case network.Name == "": @@ -748,41 +968,39 @@ func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, // LoadSecrets produces a SecretConfig map from a compose file Dict // the source Dict is not validated if directly used. Use Load() to enable validation -func LoadSecrets(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.SecretConfig, error) { +func LoadSecrets(source map[string]interface{}) (map[string]types.SecretConfig, error) { secrets := make(map[string]types.SecretConfig) if err := Transform(source, &secrets); err != nil { return secrets, err } for name, secret := range secrets { - obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details, resolvePaths) + obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret)) if err != nil { return nil, err } - secretConfig := types.SecretConfig(obj) - secrets[name] = secretConfig + secrets[name] = types.SecretConfig(obj) } return secrets, nil } // LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict // the source Dict is not validated if directly used. Use Load() to enable validation -func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.ConfigObjConfig, error) { +func LoadConfigObjs(source map[string]interface{}) (map[string]types.ConfigObjConfig, error) { configs := make(map[string]types.ConfigObjConfig) if err := Transform(source, &configs); err != nil { return configs, err } for name, config := range configs { - obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details, resolvePaths) + obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config)) if err != nil { return nil, err } - configConfig := types.ConfigObjConfig(obj) - configs[name] = configConfig + configs[name] = types.ConfigObjConfig(obj) } return configs, nil } -func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails, resolvePaths bool) (types.FileObjectConfig, error) { +func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig) (types.FileObjectConfig, error) { // if "external: true" switch { case obj.External.External: @@ -794,37 +1012,20 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name) obj.Name = obj.External.Name obj.External.Name = "" - } else { - if obj.Name == "" { - obj.Name = name - } + } else if obj.Name == "" { + obj.Name = name } // if not "external: true" case obj.Driver != "": if obj.File != "" { return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name) } - default: - if resolvePaths { - obj.File = absPath(details.WorkingDir, obj.File) - } } return obj, nil } -func absPath(workingDir string, filePath string) string { - if strings.HasPrefix(filePath, "~") { - home, _ := os.UserHomeDir() - return filepath.Join(home, filePath[1:]) - } - if filepath.IsAbs(filePath) { - return filePath - } - return filepath.Join(workingDir, filePath) -} - -var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) { +var transformOptions TransformerFunc = func(data interface{}) (interface{}, error) { switch value := data.(type) { case map[string]interface{}: return toMapStringString(value, false), nil @@ -887,30 +1088,6 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{}, } } -var transformServiceDeviceRequest TransformerFunc = func(data interface{}) (interface{}, error) { - switch value := data.(type) { - case map[string]interface{}: - count, ok := value["count"] - if ok { - switch val := count.(type) { - case int: - return value, nil - case string: - if strings.ToLower(val) == "all" { - value["count"] = -1 - return value, nil - } - return data, errors.Errorf("invalid string value for 'count' (the only value allowed is 'all')") - default: - return data, errors.Errorf("invalid type %T for device count", val) - } - } - return data, nil - default: - return data, errors.Errorf("invalid type %T for resource reservation", value) - } -} - var transformFileReferenceConfig TransformerFunc = func(data interface{}) (interface{}, error) { switch value := data.(type) { case string: @@ -929,7 +1106,7 @@ func cleanTarget(target string) string { if target == "" { return "" } - return path.Clean(target) + return paths.Clean(target) } var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) { @@ -950,26 +1127,38 @@ var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface for _, serviceIntf := range value { service, ok := serviceIntf.(string) if !ok { - return data, errors.Errorf("invalid type %T for service depends_on elementn, expected string", value) + return data, errors.Errorf("invalid type %T for service depends_on element, expected string", value) } - transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted} + transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted, "required": true} } return transformed, nil case map[string]interface{}: - return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil + transformed := map[string]interface{}{} + for service, val := range value { + dependsConfigIntf, ok := val.(map[string]interface{}) + if !ok { + return data, errors.Errorf("invalid type %T for service depends_on element", value) + } + if _, ok := dependsConfigIntf["required"]; !ok { + dependsConfigIntf["required"] = true + } + transformed[service] = dependsConfigIntf + } + return groupXFieldsIntoExtensions(transformed), nil default: return data, errors.Errorf("invalid type %T for service depends_on", value) } } -var transformExtendsConfig TransformerFunc = func(data interface{}) (interface{}, error) { - switch data.(type) { +var transformExtendsConfig TransformerFunc = func(value interface{}) (interface{}, error) { + switch value.(type) { case string: - data = map[string]interface{}{ - "service": data, - } + return map[string]interface{}{"service": value}, nil + case map[string]interface{}: + return value, nil + default: + return value, errors.Errorf("invalid type %T for extends", value) } - return transformMappingOrListFunc("=", true)(data) } var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) { @@ -1034,48 +1223,12 @@ func ParseShortSSHSyntax(value string) ([]types.SSHKey, error) { return result, nil } -var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) { - list := value.([]interface{}) - result := make([]string, len(list)) - for i, item := range list { - result[i] = fmt.Sprint(item) - } - return result, nil -} - -var transformStringList TransformerFunc = func(data interface{}) (interface{}, error) { - switch value := data.(type) { - case string: - return []string{value}, nil - case []interface{}: - return value, nil - default: - return data, errors.Errorf("invalid type %T for string list", value) - } -} - func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc { return func(data interface{}) (interface{}, error) { return transformMappingOrList(data, sep, allowNil) } } -func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc { - return func(data interface{}) (interface{}, error) { - return transformListOrMapping(data, sep, allowNil) - } -} - -func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) (interface{}, error) { - switch value := listOrMapping.(type) { - case map[string]interface{}: - return toStringList(value, sep, allowNil), nil - case []interface{}: - return listOrMapping, nil - } - return nil, errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping) -} - func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) (interface{}, error) { switch value := mappingOrList.(type) { case map[string]interface{}: @@ -1104,52 +1257,6 @@ func transformValueToMapEntry(value string, separator string, allowNil bool) (st } } -var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) { - if str, ok := value.(string); ok { - return shellwords.Parse(str) - } - return value, nil -} - -var transformHealthCheckTest TransformerFunc = func(data interface{}) (interface{}, error) { - switch value := data.(type) { - case string: - return append([]string{"CMD-SHELL"}, value), nil - case []interface{}: - return value, nil - default: - return value, errors.Errorf("invalid type %T for healthcheck.test", value) - } -} - -var transformSize TransformerFunc = func(value interface{}) (interface{}, error) { - switch value := value.(type) { - case int: - return int64(value), nil - case int64, types.UnitBytes: - return value, nil - case string: - return units.RAMInBytes(value) - default: - return value, errors.Errorf("invalid type for size %T", value) - } -} - -var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) { - switch value := value.(type) { - case string: - d, err := time.ParseDuration(value) - if err != nil { - return value, err - } - return types.Duration(d), nil - case types.Duration: - return value, nil - default: - return value, errors.Errorf("invalid type %T for duration", value) - } -} - func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} { output := make(map[string]interface{}) for key, value := range value { @@ -1168,15 +1275,3 @@ func toString(value interface{}, allowNil bool) interface{} { return "" } } - -func toStringList(value map[string]interface{}, separator string, allowNil bool) []string { - var output []string - for key, value := range value { - if value == nil && !allowNil { - continue - } - output = append(output, fmt.Sprintf("%s%s%s", key, separator, value)) - } - sort.Strings(output) - return output -} diff --git a/vendor/github.com/compose-spec/compose-go/loader/mapstructure.go b/vendor/github.com/compose-spec/compose-go/loader/mapstructure.go new file mode 100644 index 0000000000..97a2e39c12 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/mapstructure.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import "reflect" + +// comparable to yaml.Unmarshaler, decoder allow a type to define it's own custom logic to convert value +// see https://github.com/mitchellh/mapstructure/pull/294 +type decoder interface { + DecodeMapstructure(interface{}) error +} + +// see https://github.com/mitchellh/mapstructure/issues/115#issuecomment-735287466 +// adapted to support types derived from built-in types, as DecodeMapstructure would not be able to mutate internal +// value, so need to invoke DecodeMapstructure defined by pointer to type +func decoderHook(from reflect.Value, to reflect.Value) (interface{}, error) { + // If the destination implements the decoder interface + u, ok := to.Interface().(decoder) + if !ok { + // for non-struct types we need to invoke func (*type) DecodeMapstructure() + if to.CanAddr() { + pto := to.Addr() + u, ok = pto.Interface().(decoder) + } + if !ok { + return from.Interface(), nil + } + } + // If it is nil and a pointer, create and assign the target value first + if to.Type().Kind() == reflect.Ptr && to.IsNil() { + to.Set(reflect.New(to.Type().Elem())) + u = to.Interface().(decoder) + } + // Call the custom DecodeMapstructure method + if err := u.DecodeMapstructure(from.Interface()); err != nil { + return to.Interface(), err + } + return to.Interface(), nil +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/merge.go b/vendor/github.com/compose-spec/compose-go/loader/merge.go index f6138ca292..654d711dee 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/merge.go +++ b/vendor/github.com/compose-spec/compose-go/loader/merge.go @@ -38,11 +38,19 @@ var serviceSpecials = &specials{ reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice), reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice), reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig, - reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig, }, } func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error { + // TODO this is a workaround waiting for imdario/mergo#131 + if t.Kind() == reflect.Pointer && t.Elem().Kind() == reflect.Bool { + return func(dst, src reflect.Value) error { + if dst.CanSet() && !src.IsNil() { + dst.Set(src) + } + return nil + } + } if fn, ok := s.m[t]; ok { return fn } @@ -113,12 +121,18 @@ func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, } func _merge(baseService *types.ServiceConfig, overrideService *types.ServiceConfig) (*types.ServiceConfig, error) { - if err := mergo.Merge(baseService, overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil { + if err := mergo.Merge(baseService, overrideService, + mergo.WithAppendSlice, + mergo.WithOverride, + mergo.WithTransformers(serviceSpecials)); err != nil { return nil, err } if overrideService.Command != nil { baseService.Command = overrideService.Command } + if overrideService.HealthCheck != nil && overrideService.HealthCheck.Test != nil { + baseService.HealthCheck.Test = overrideService.HealthCheck.Test + } if overrideService.Entrypoint != nil { baseService.Entrypoint = overrideService.Entrypoint } @@ -127,9 +141,25 @@ func _merge(baseService *types.ServiceConfig, overrideService *types.ServiceConf } else { baseService.Environment = overrideService.Environment } + baseService.Expose = unique(baseService.Expose) return baseService, nil } +func unique(slice []string) []string { + if slice == nil { + return nil + } + uniqMap := make(map[string]struct{}) + var uniqSlice []string + for _, v := range slice { + if _, ok := uniqMap[v]; !ok { + uniqSlice = append(uniqSlice, v) + uniqMap[v] = struct{}{} + } + } + return uniqSlice +} + func toServiceSecretConfigsMap(s interface{}) (map[interface{}]interface{}, error) { secrets, ok := s.([]types.ServiceSecretConfig) if !ok { @@ -290,8 +320,8 @@ func mergeLoggingConfig(dst, src reflect.Value) error { if getLoggingDriver(dst.Elem()) == "" { dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem())) } - dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string) - srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string) + dstOptions := dst.Elem().FieldByName("Options").Interface().(types.Options) + srcOptions := src.Elem().FieldByName("Options").Interface().(types.Options) return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride) } // Different driver, override with src @@ -307,20 +337,6 @@ func mergeUlimitsConfig(dst, src reflect.Value) error { return nil } -// nolint: unparam -func mergeServiceNetworkConfig(dst, src reflect.Value) error { - if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() { - dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases")) - if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" { - dst.Elem().FieldByName("Ipv4Address").SetString(ipv4) - } - if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" { - dst.Elem().FieldByName("Ipv6Address").SetString(ipv6) - } - } - return nil -} - func getLoggingDriver(v reflect.Value) string { return v.FieldByName("Driver").String() } diff --git a/vendor/github.com/compose-spec/compose-go/loader/normalize.go b/vendor/github.com/compose-spec/compose-go/loader/normalize.go index 4b98d624a7..58863b5fab 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/normalize.go +++ b/vendor/github.com/compose-spec/compose-go/loader/normalize.go @@ -18,8 +18,7 @@ package loader import ( "fmt" - "os" - "path/filepath" + "strings" "github.com/compose-spec/compose-go/errdefs" "github.com/compose-spec/compose-go/types" @@ -27,20 +26,8 @@ import ( "github.com/sirupsen/logrus" ) -// normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults -func normalize(project *types.Project, resolvePaths bool) error { - absWorkingDir, err := filepath.Abs(project.WorkingDir) - if err != nil { - return err - } - project.WorkingDir = absWorkingDir - - absComposeFiles, err := absComposeFiles(project.ComposeFiles) - if err != nil { - return err - } - project.ComposeFiles = absComposeFiles - +// Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults +func Normalize(project *types.Project) error { if project.Networks == nil { project.Networks = make(map[string]types.NetworkConfig) } @@ -50,8 +37,7 @@ func normalize(project *types.Project, resolvePaths bool) error { project.Networks["default"] = types.NetworkConfig{} } - err = relocateExternalName(project) - if err != nil { + if err := relocateExternalName(project); err != nil { return err } @@ -71,22 +57,50 @@ func normalize(project *types.Project, resolvePaths bool) error { } if s.Build != nil { - if s.Build.Dockerfile == "" { - s.Build.Dockerfile = "Dockerfile" + if s.Build.Context == "" { + s.Build.Context = "." } - localContext := absPath(project.WorkingDir, s.Build.Context) - if _, err := os.Stat(localContext); err == nil { - if resolvePaths { - s.Build.Context = localContext - } - // } else { - // might be a remote http/git context. Unfortunately supported "remote" syntax is highly ambiguous - // in moby/moby and not defined by compose-spec, so let's assume runtime will check + if s.Build.Dockerfile == "" && s.Build.DockerfileInline == "" { + s.Build.Dockerfile = "Dockerfile" } s.Build.Args = s.Build.Args.Resolve(fn) } s.Environment = s.Environment.Resolve(fn) + for _, link := range s.Links { + parts := strings.Split(link, ":") + if len(parts) == 2 { + link = parts[0] + } + s.DependsOn = setIfMissing(s.DependsOn, link, types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Restart: true, + Required: true, + }) + } + + for _, namespace := range []string{s.NetworkMode, s.Ipc, s.Pid, s.Uts, s.Cgroup} { + if strings.HasPrefix(namespace, types.ServicePrefix) { + name := namespace[len(types.ServicePrefix):] + s.DependsOn = setIfMissing(s.DependsOn, name, types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Restart: true, + Required: true, + }) + } + } + + for _, vol := range s.VolumesFrom { + if !strings.HasPrefix(vol, types.ContainerPrefix) { + spec := strings.Split(vol, ":") + s.DependsOn = setIfMissing(s.DependsOn, spec[0], types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Restart: false, + Required: true, + }) + } + } + err := relocateLogDriver(&s) if err != nil { return err @@ -107,6 +121,8 @@ func normalize(project *types.Project, resolvePaths bool) error { return err } + inferImplicitDependencies(&s) + project.Services[i] = s } @@ -115,9 +131,76 @@ func normalize(project *types.Project, resolvePaths bool) error { return nil } +// IsServiceDependency check the relation set by ref refers to a service +func IsServiceDependency(ref string) (string, bool) { + if strings.HasPrefix( + ref, + types.ServicePrefix, + ) { + return ref[len(types.ServicePrefix):], true + } + return "", false +} + +func inferImplicitDependencies(service *types.ServiceConfig) { + var dependencies []string + + maybeReferences := []string{ + service.NetworkMode, + service.Ipc, + service.Pid, + service.Uts, + service.Cgroup, + } + for _, ref := range maybeReferences { + if dep, ok := IsServiceDependency(ref); ok { + dependencies = append(dependencies, dep) + } + } + + for _, vol := range service.VolumesFrom { + spec := strings.Split(vol, ":") + if len(spec) == 0 { + continue + } + if spec[0] == "container" { + continue + } + dependencies = append(dependencies, spec[0]) + } + + for _, link := range service.Links { + dependencies = append(dependencies, strings.Split(link, ":")[0]) + } + + if len(dependencies) > 0 && service.DependsOn == nil { + service.DependsOn = make(types.DependsOnConfig) + } + + for _, d := range dependencies { + if _, ok := service.DependsOn[d]; !ok { + service.DependsOn[d] = types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Required: true, + } + } + } +} + +// setIfMissing adds a ServiceDependency for service if not already defined +func setIfMissing(d types.DependsOnConfig, service string, dep types.ServiceDependency) types.DependsOnConfig { + if d == nil { + d = types.DependsOnConfig{} + } + if _, ok := d[service]; !ok { + d[service] = dep + } + return d +} + func relocateScale(s *types.ServiceConfig) error { scale := uint64(s.Scale) - if scale != 1 { + if scale > 1 { logrus.Warn("`scale` is deprecated. Use the `deploy.replicas` element") if s.Deploy == nil { s.Deploy = &types.DeployConfig{} @@ -130,18 +213,6 @@ func relocateScale(s *types.ServiceConfig) error { return nil } -func absComposeFiles(composeFiles []string) ([]string, error) { - absComposeFiles := make([]string, len(composeFiles)) - for i, composeFile := range composeFiles { - absComposefile, err := filepath.Abs(composeFile) - if err != nil { - return nil, err - } - absComposeFiles[i] = absComposefile - } - return absComposeFiles, nil -} - // Resources with no explicit name are actually named by their key in map func setNameFromKey(project *types.Project) { for i, n := range project.Networks { diff --git a/vendor/github.com/compose-spec/compose-go/loader/null.go b/vendor/github.com/compose-spec/compose-go/loader/null.go new file mode 100644 index 0000000000..648aacde4d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/null.go @@ -0,0 +1,159 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/tree" + "github.com/compose-spec/compose-go/types" + "gopkg.in/yaml.v3" +) + +type ResetProcessor struct { + target interface{} + paths []tree.Path +} + +// UnmarshalYAML implement yaml.Unmarshaler +func (p *ResetProcessor) UnmarshalYAML(value *yaml.Node) error { + resolved, err := p.resolveReset(value, tree.NewPath()) + if err != nil { + return err + } + return resolved.Decode(p.target) +} + +// resolveReset detects `!reset` tag being set on yaml nodes and record position in the yaml tree +func (p *ResetProcessor) resolveReset(node *yaml.Node, path tree.Path) (*yaml.Node, error) { + if node.Tag == "!reset" { + p.paths = append(p.paths, path) + } + switch node.Kind { + case yaml.SequenceNode: + var err error + for idx, v := range node.Content { + next := path.Next(strconv.Itoa(idx)) + node.Content[idx], err = p.resolveReset(v, next) + if err != nil { + return nil, err + } + } + case yaml.MappingNode: + var err error + var key string + for idx, v := range node.Content { + if idx%2 == 0 { + key = v.Value + } else { + node.Content[idx], err = p.resolveReset(v, path.Next(key)) + if err != nil { + return nil, err + } + } + } + } + return node, nil +} + +// Apply finds the go attributes matching recorded paths and reset them to zero value +func (p *ResetProcessor) Apply(target *types.Config) error { + return p.applyNullOverrides(reflect.ValueOf(target), tree.NewPath()) +} + +// applyNullOverrides set val to Zero if it matches any of the recorded paths +func (p *ResetProcessor) applyNullOverrides(val reflect.Value, path tree.Path) error { + val = reflect.Indirect(val) + if !val.IsValid() { + return nil + } + typ := val.Type() + switch { + case path == "services": + // Project.Services is a slice in compose-go, but a mapping in yaml + for i := 0; i < val.Len(); i++ { + service := val.Index(i) + name := service.FieldByName("Name") + next := path.Next(name.String()) + err := p.applyNullOverrides(service, next) + if err != nil { + return err + } + } + case typ.Kind() == reflect.Map: + iter := val.MapRange() + KEYS: + for iter.Next() { + k := iter.Key() + next := path.Next(k.String()) + for _, pattern := range p.paths { + if next.Matches(pattern) { + val.SetMapIndex(k, reflect.Value{}) + continue KEYS + } + } + return p.applyNullOverrides(iter.Value(), next) + } + case typ.Kind() == reflect.Slice: + ITER: + for i := 0; i < val.Len(); i++ { + next := path.Next(fmt.Sprintf("[%d]", i)) + for _, pattern := range p.paths { + if next.Matches(pattern) { + + continue ITER + } + } + // TODO(ndeloof) support removal from sequence + return p.applyNullOverrides(val.Index(i), next) + } + + case typ.Kind() == reflect.Struct: + FIELDS: + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + name := field.Name + attr := strings.ToLower(name) + tag := field.Tag.Get("yaml") + tag = strings.Split(tag, ",")[0] + if tag != "" && tag != "-" { + attr = tag + } + next := path.Next(attr) + f := val.Field(i) + for _, pattern := range p.paths { + if next.Matches(pattern) { + f := f + if !f.CanSet() { + return fmt.Errorf("can't override attribute %s", name) + } + // f.SetZero() requires go 1.20 + f.Set(reflect.Zero(f.Type())) + continue FIELDS + } + } + err := p.applyNullOverrides(f, next) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/paths.go b/vendor/github.com/compose-spec/compose-go/loader/paths.go new file mode 100644 index 0000000000..519a6a6900 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/loader/paths.go @@ -0,0 +1,172 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "os" + "path/filepath" + "strings" + + "github.com/compose-spec/compose-go/types" +) + +// ResolveRelativePaths resolves relative paths based on project WorkingDirectory +func ResolveRelativePaths(project *types.Project) error { + absWorkingDir, err := filepath.Abs(project.WorkingDir) + if err != nil { + return err + } + project.WorkingDir = absWorkingDir + + absComposeFiles, err := absComposeFiles(project.ComposeFiles) + if err != nil { + return err + } + project.ComposeFiles = absComposeFiles + + for i, s := range project.Services { + ResolveServiceRelativePaths(project.WorkingDir, &s) + project.Services[i] = s + } + + for i, obj := range project.Configs { + if obj.File != "" { + obj.File = absPath(project.WorkingDir, obj.File) + project.Configs[i] = obj + } + } + + for i, obj := range project.Secrets { + if obj.File != "" { + obj.File = resolveMaybeUnixPath(project.WorkingDir, obj.File) + project.Secrets[i] = obj + } + } + + for name, config := range project.Volumes { + if config.Driver == "local" && config.DriverOpts["o"] == "bind" { + // This is actually a bind mount + config.DriverOpts["device"] = resolveMaybeUnixPath(project.WorkingDir, config.DriverOpts["device"]) + project.Volumes[name] = config + } + } + + // don't coerce a nil map to an empty map + if project.IncludeReferences != nil { + absIncludes := make(map[string][]types.IncludeConfig, len(project.IncludeReferences)) + for filename, config := range project.IncludeReferences { + filename = absPath(project.WorkingDir, filename) + absConfigs := make([]types.IncludeConfig, len(config)) + for i, c := range config { + absConfigs[i] = types.IncludeConfig{ + Path: resolvePaths(project.WorkingDir, c.Path), + ProjectDirectory: absPath(project.WorkingDir, c.ProjectDirectory), + EnvFile: resolvePaths(project.WorkingDir, c.EnvFile), + } + } + absIncludes[filename] = absConfigs + } + project.IncludeReferences = absIncludes + } + + return nil +} + +func ResolveServiceRelativePaths(workingDir string, s *types.ServiceConfig) { + if s.Build != nil { + if !isRemoteContext(s.Build.Context) { + s.Build.Context = absPath(workingDir, s.Build.Context) + } + for name, path := range s.Build.AdditionalContexts { + if strings.Contains(path, "://") { // `docker-image://` or any builder specific context type + continue + } + if isRemoteContext(path) { + continue + } + s.Build.AdditionalContexts[name] = absPath(workingDir, path) + } + } + for j, f := range s.EnvFile { + s.EnvFile[j] = absPath(workingDir, f) + } + + if s.Extends != nil && s.Extends.File != "" { + s.Extends.File = absPath(workingDir, s.Extends.File) + } + + for i, vol := range s.Volumes { + if vol.Type != types.VolumeTypeBind { + continue + } + s.Volumes[i].Source = resolveMaybeUnixPath(workingDir, vol.Source) + } + + if s.Develop != nil { + for i, w := range s.Develop.Watch { + w.Path = absPath(workingDir, w.Path) + s.Develop.Watch[i] = w + } + } +} + +func absPath(workingDir string, filePath string) string { + if strings.HasPrefix(filePath, "~") { + home, _ := os.UserHomeDir() + return filepath.Join(home, filePath[1:]) + } + if filepath.IsAbs(filePath) { + return filePath + } + return filepath.Join(workingDir, filePath) +} + +func absComposeFiles(composeFiles []string) ([]string, error) { + for i, composeFile := range composeFiles { + absComposefile, err := filepath.Abs(composeFile) + if err != nil { + return nil, err + } + composeFiles[i] = absComposefile + } + return composeFiles, nil +} + +// isRemoteContext returns true if the value is a Git reference or HTTP(S) URL. +// +// Any other value is assumed to be a local filesystem path and returns false. +// +// See: https://github.com/moby/buildkit/blob/18fc875d9bfd6e065cd8211abc639434ba65aa56/frontend/dockerui/context.go#L76-L79 +func isRemoteContext(maybeURL string) bool { + for _, prefix := range []string{"https://", "http://", "git://", "ssh://", "github.com/", "git@"} { + if strings.HasPrefix(maybeURL, prefix) { + return true + } + } + return false +} + +func resolvePaths(basePath string, in types.StringList) types.StringList { + if in == nil { + return nil + } + ret := make(types.StringList, len(in)) + for i := range in { + ret[i] = absPath(basePath, in[i]) + } + return ret +} diff --git a/vendor/github.com/compose-spec/compose-go/loader/validate.go b/vendor/github.com/compose-spec/compose-go/loader/validate.go index 4493c051da..b4c42c7f1f 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/validate.go +++ b/vendor/github.com/compose-spec/compose-go/loader/validate.go @@ -32,12 +32,48 @@ func checkConsistency(project *types.Project) error { return errors.Wrapf(errdefs.ErrInvalid, "service %q has neither an image nor a build context specified", s.Name) } + if s.Build != nil { + if s.Build.DockerfileInline != "" && s.Build.Dockerfile != "" { + return errors.Wrapf(errdefs.ErrInvalid, "service %q declares mutualy exclusive dockerfile and dockerfile_inline", s.Name) + } + + if len(s.Build.Platforms) > 0 && s.Platform != "" { + var found bool + for _, platform := range s.Build.Platforms { + if platform == s.Platform { + found = true + break + } + } + if !found { + return errors.Wrapf(errdefs.ErrInvalid, "service.build.platforms MUST include service.platform %q ", s.Platform) + } + } + } + + if s.NetworkMode != "" && len(s.Networks) > 0 { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %s declares mutually exclusive `network_mode` and `networks`", s.Name)) + } for network := range s.Networks { if _, ok := project.Networks[network]; !ok { return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined network %s", s.Name, network)) } } + if s.HealthCheck != nil && len(s.HealthCheck.Test) > 0 { + switch s.HealthCheck.Test[0] { + case "CMD", "CMD-SHELL", "NONE": + default: + return errors.New(`healthcheck.test must start either by "CMD", "CMD-SHELL" or "NONE"`) + } + } + + for dependedService := range s.DependsOn { + if _, err := project.GetService(dependedService); err != nil { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q depends on undefined service %s", s.Name, dependedService)) + } + } + if strings.HasPrefix(s.NetworkMode, types.ServicePrefix) { serviceName := s.NetworkMode[len(types.ServicePrefix):] if _, err := project.GetServices(serviceName); err != nil { @@ -46,18 +82,17 @@ func checkConsistency(project *types.Project) error { } for _, volume := range s.Volumes { - switch volume.Type { - case types.VolumeTypeVolume: - if volume.Source != "" { // non anonymous volumes - if _, ok := project.Volumes[volume.Source]; !ok { - return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined volume %s", s.Name, volume.Source)) - } + if volume.Type == types.VolumeTypeVolume && volume.Source != "" { // non anonymous volumes + if _, ok := project.Volumes[volume.Source]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined volume %s", s.Name, volume.Source)) } } } - for _, secret := range s.Secrets { - if _, ok := project.Secrets[secret.Source]; !ok { - return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined secret %s", s.Name, secret.Source)) + if s.Build != nil { + for _, secret := range s.Build.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined build secret %s", s.Name, secret.Source)) + } } } for _, config := range s.Configs { @@ -65,6 +100,22 @@ func checkConsistency(project *types.Project) error { return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined config %s", s.Name, config.Source)) } } + + for _, secret := range s.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined secret %s", s.Name, secret.Source)) + } + } } + + for name, secret := range project.Secrets { + if secret.External.External { + continue + } + if secret.File == "" && secret.Environment == "" { + return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("secret %q must declare either `file` or `environment`", name)) + } + } + return nil } diff --git a/vendor/github.com/compose-spec/compose-go/loader/volume.go b/vendor/github.com/compose-spec/compose-go/loader/volume.go index f1e66cde83..dd83414ac9 100644 --- a/vendor/github.com/compose-spec/compose-go/loader/volume.go +++ b/vendor/github.com/compose-spec/compose-go/loader/volume.go @@ -176,5 +176,8 @@ func isFilePath(source string) bool { } first, nextIndex := utf8.DecodeRuneInString(source) + if len(source) <= nextIndex { + return false + } return isWindowsDrive([]rune{first}, rune(source[nextIndex])) } diff --git a/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json b/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json index b2088998b9..d4bff38458 100644 --- a/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json +++ b/vendor/github.com/compose-spec/compose-go/schema/compose-spec.json @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/draft/2019-09/schema#", + "$schema": "https://json-schema.org/draft/2019-09/schema#", "id": "compose_spec.json", "type": "object", "title": "Compose Specification", @@ -13,9 +13,19 @@ "name": { "type": "string", + "pattern": "^[a-z0-9][a-z0-9_-]*$", "description": "define the Compose project name, until user defines one explicitly." }, + "include": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/include" + }, + "description": "compose sub-projects to be included." + }, + "services": { "id": "#/properties/services", "type": "object", @@ -81,7 +91,10 @@ "type": "object", "properties": { + "develop": {"$ref": "#/definitions/development"}, "deploy": {"$ref": "#/definitions/deployment"}, + "annotations": {"$ref": "#/definitions/list_or_dict"}, + "attach": {"type": "boolean"}, "build": { "oneOf": [ {"type": "string"}, @@ -90,18 +103,24 @@ "properties": { "context": {"type": "string"}, "dockerfile": {"type": "string"}, + "dockerfile_inline": {"type": "string"}, "args": {"$ref": "#/definitions/list_or_dict"}, "ssh": {"$ref": "#/definitions/list_or_dict"}, "labels": {"$ref": "#/definitions/list_or_dict"}, "cache_from": {"type": "array", "items": {"type": "string"}}, "cache_to": {"type": "array", "items": {"type": "string"}}, "no_cache": {"type": "boolean"}, + "additional_contexts": {"$ref": "#/definitions/list_or_dict"}, "network": {"type": "string"}, "pull": {"type": "boolean"}, "target": {"type": "string"}, "shm_size": {"type": ["integer", "string"]}, "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, - "isolation": {"type": "string"} + "isolation": {"type": "string"}, + "privileged": {"type": "boolean"}, + "secrets": {"$ref": "#/definitions/service_config_or_secret"}, + "tags": {"type": "array", "items": {"type": "string"}}, + "platforms": {"type": "array", "items": {"type": "string"}} }, "additionalProperties": false, "patternProperties": {"^x-": {}} @@ -137,33 +156,10 @@ }, "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup": {"type": "string", "enum": ["host", "private"]}, "cgroup_parent": {"type": "string"}, - "command": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "configs": { - "type": "array", - "items": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "source": {"type": "string"}, - "target": {"type": "string"}, - "uid": {"type": "string"}, - "gid": {"type": "string"}, - "mode": {"type": "number"} - }, - "additionalProperties": false, - "patternProperties": {"^x-": {}} - } - ] - } - }, + "command": {"$ref": "#/definitions/command"}, + "configs": {"$ref": "#/definitions/service_config_or_secret"}, "container_name": {"type": "string"}, "cpu_count": {"type": "integer", "minimum": 0}, "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100}, @@ -195,6 +191,11 @@ "type": "object", "additionalProperties": false, "properties": { + "restart": {"type": "boolean"}, + "required": { + "type": "boolean", + "default": true + }, "condition": { "type": "string", "enum": ["service_started", "service_healthy", "service_completed_successfully"] @@ -212,12 +213,7 @@ "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, "dns_search": {"$ref": "#/definitions/string_or_list"}, "domainname": {"type": "string"}, - "entrypoint": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, + "entrypoint": {"$ref": "#/definitions/command"}, "env_file": {"$ref": "#/definitions/string_or_list"}, "environment": {"$ref": "#/definitions/list_or_dict"}, @@ -352,26 +348,7 @@ }, "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "shm_size": {"type": ["number", "string"]}, - "secrets": { - "type": "array", - "items": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "source": {"type": "string"}, - "target": {"type": "string"}, - "uid": {"type": "string"}, - "gid": {"type": "string"}, - "mode": {"type": "number"} - }, - "additionalProperties": false, - "patternProperties": {"^x-": {}} - } - ] - } - }, + "secrets": {"$ref": "#/definitions/service_config_or_secret"}, "sysctls": {"$ref": "#/definitions/list_or_dict"}, "stdin_open": {"type": "boolean"}, "stop_grace_period": {"type": "string", "format": "duration"}, @@ -400,6 +377,7 @@ } }, "user": {"type": "string"}, + "uts": {"type": "string"}, "userns_mode": {"type": "string"}, "volumes": { "type": "array", @@ -441,7 +419,8 @@ {"type": "integer", "minimum": 0}, {"type": "string"} ] - } + }, + "mode": {"type": "number"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} @@ -479,11 +458,32 @@ ] }, "timeout": {"type": "string", "format": "duration"}, - "start_period": {"type": "string", "format": "duration"} + "start_period": {"type": "string", "format": "duration"}, + "start_interval": {"type": "string", "format": "duration"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, + "development": { + "id": "#/definitions/development", + "type": ["object", "null"], + "properties": { + "watch": { + "type": "array", + "items": { + "type": "object", + "properties": { + "ignore": {"type": "array", "items": {"type": "string"}}, + "path": {"type": "string"}, + "action": {"type": "string", "enum": ["rebuild", "sync", "sync+restart"]}, + "target": {"type": "string"} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + } + }, "deployment": { "id": "#/definitions/deployment", "type": ["object", "null"], @@ -624,6 +624,22 @@ } }, + "include": { + "id": "#/definitions/include", + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "path": {"$ref": "#/definitions/string_or_list"}, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "project_directory": {"type": "string"} + }, + "additionalProperties": false + } + ] + }, + "network": { "id": "#/definitions/network", "type": ["object", "null"], @@ -721,6 +737,7 @@ "type": "object", "properties": { "name": {"type": "string"}, + "environment": {"type": "string"}, "file": {"type": "string"}, "external": { "type": ["boolean", "object"], @@ -764,6 +781,14 @@ "patternProperties": {"^x-": {}} }, + "command": { + "oneOf": [ + {"type": "null"}, + {"type": "string"}, + {"type": "array","items": {"type": "string"}} + ] + }, + "string_or_list": { "oneOf": [ {"type": "string"}, @@ -809,6 +834,27 @@ "additionalProperties": false }, + "service_config_or_secret": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + }, + "constraints": { "service": { "id": "#/definitions/constraints/service", diff --git a/vendor/github.com/compose-spec/compose-go/schema/schema.go b/vendor/github.com/compose-spec/compose-go/schema/schema.go index af3cb0a3be..bfbaa93557 100644 --- a/vendor/github.com/compose-spec/compose-go/schema/schema.go +++ b/vendor/github.com/compose-spec/compose-go/schema/schema.go @@ -17,19 +17,18 @@ package schema import ( + // Enable support for embedded static resources + _ "embed" "fmt" "strings" "time" "github.com/xeipuuv/gojsonschema" - - // Enable support for embedded static resources - _ "embed" ) type portsFormatChecker struct{} -func (checker portsFormatChecker) IsFormat(input interface{}) bool { +func (checker portsFormatChecker) IsFormat(_ interface{}) bool { // TODO: implement this return true } @@ -52,6 +51,7 @@ func init() { } // Schema is the compose-spec JSON schema +// //go:embed compose-spec.json var Schema string diff --git a/vendor/github.com/compose-spec/compose-go/template/template.go b/vendor/github.com/compose-spec/compose-go/template/template.go index 22e4e95ada..9367f39546 100644 --- a/vendor/github.com/compose-spec/compose-go/template/template.go +++ b/vendor/github.com/compose-spec/compose-go/template/template.go @@ -17,8 +17,10 @@ package template import ( + "errors" "fmt" "regexp" + "sort" "strings" "github.com/sirupsen/logrus" @@ -26,12 +28,20 @@ import ( var delimiter = "\\$" var substitutionNamed = "[_a-z][_a-z0-9]*" +var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-+?](.*))?" -var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-?](.*}|[^}]*))?" +var groupEscaped = "escaped" +var groupNamed = "named" +var groupBraced = "braced" +var groupInvalid = "invalid" var patternString = fmt.Sprintf( - "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", - delimiter, delimiter, substitutionNamed, substitutionBraced, + "%s(?i:(?P<%s>%s)|(?P<%s>%s)|{(?:(?P<%s>%s)}|(?P<%s>)))", + delimiter, + groupEscaped, delimiter, + groupNamed, substitutionNamed, + groupBraced, substitutionBraced, + groupInvalid, ) var defaultPattern = regexp.MustCompile(patternString) @@ -46,6 +56,19 @@ func (e InvalidTemplateError) Error() string { return fmt.Sprintf("Invalid template: %#v", e.Template) } +// MissingRequiredError is returned when a variable template is missing +type MissingRequiredError struct { + Variable string + Reason string +} + +func (e MissingRequiredError) Error() string { + if e.Reason != "" { + return fmt.Sprintf("required variable %s is missing a value: %s", e.Variable, e.Reason) + } + return fmt.Sprintf("required variable %s is missing a value", e.Variable) +} + // Mapping is a user-supplied function which maps from variable names to values. // Returns the value as a string and a bool indicating whether // the value is present, to distinguish between an empty string @@ -57,87 +80,173 @@ type Mapping func(string) (string, bool) // the substitution and an error. type SubstituteFunc func(string, Mapping) (string, bool, error) -// SubstituteWith substitute variables in the string with their values. -// It accepts additional substitute function. -func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { - if len(subsFuncs) == 0 { - subsFuncs = getDefaultSortedSubstitutionFunctions(template) - } - var err error - result := pattern.ReplaceAllStringFunc(template, func(substring string) string { - closingBraceIndex := getFirstBraceClosingIndex(substring) - rest := "" - if closingBraceIndex > -1 { - rest = substring[closingBraceIndex+1:] - substring = substring[0 : closingBraceIndex+1] - } +// ReplacementFunc is a user-supplied function that is apply to the matching +// substring. Returns the value as a string and an error. +type ReplacementFunc func(string, Mapping, *Config) (string, error) - matches := pattern.FindStringSubmatch(substring) - groups := matchGroups(matches, pattern) - if escaped := groups["escaped"]; escaped != "" { - return escaped - } +type Config struct { + pattern *regexp.Regexp + substituteFunc SubstituteFunc + replacementFunc ReplacementFunc + logging bool +} - braced := false - substitution := groups["named"] - if substitution == "" { - substitution = groups["braced"] - braced = true - } +type Option func(*Config) - if substitution == "" { - err = &InvalidTemplateError{Template: template} - return "" - } +func WithPattern(pattern *regexp.Regexp) Option { + return func(cfg *Config) { + cfg.pattern = pattern + } +} - if braced { - for _, f := range subsFuncs { - var ( - value string - applied bool - ) - value, applied, err = f(substitution, mapping) - if err != nil { - return "" - } - if !applied { - continue - } - interpolatedNested, err := SubstituteWith(rest, mapping, pattern, subsFuncs...) - if err != nil { - return "" +func WithSubstitutionFunction(subsFunc SubstituteFunc) Option { + return func(cfg *Config) { + cfg.substituteFunc = subsFunc + } +} + +func WithReplacementFunction(replacementFunc ReplacementFunc) Option { + return func(cfg *Config) { + cfg.replacementFunc = replacementFunc + } +} + +func WithoutLogging(cfg *Config) { + cfg.logging = false +} + +// SubstituteWithOptions substitute variables in the string with their values. +// It accepts additional options such as a custom function or pattern. +func SubstituteWithOptions(template string, mapping Mapping, options ...Option) (string, error) { + var returnErr error + + cfg := &Config{ + pattern: defaultPattern, + replacementFunc: DefaultReplacementFunc, + logging: true, + } + for _, o := range options { + o(cfg) + } + + result := cfg.pattern.ReplaceAllStringFunc(template, func(substring string) string { + replacement, err := cfg.replacementFunc(substring, mapping, cfg) + if err != nil { + // Add the template for template errors + var tmplErr *InvalidTemplateError + if errors.As(err, &tmplErr) { + if tmplErr.Template == "" { + tmplErr.Template = template } - return value + interpolatedNested } - } + // Save the first error to be returned + if returnErr == nil { + returnErr = err + } - value, ok := mapping(substitution) - if !ok { - logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution) } - return value + return replacement }) - return result, err + return result, returnErr +} + +func DefaultReplacementFunc(substring string, mapping Mapping, cfg *Config) (string, error) { + value, _, err := DefaultReplacementAppliedFunc(substring, mapping, cfg) + return value, err } -func getDefaultSortedSubstitutionFunctions(template string, fns ...SubstituteFunc) []SubstituteFunc { - hyphenIndex := strings.IndexByte(template, '-') - questionIndex := strings.IndexByte(template, '?') - if hyphenIndex < 0 || hyphenIndex > questionIndex { - return []SubstituteFunc{ - requiredNonEmpty, - required, - softDefault, - hardDefault, +func DefaultReplacementAppliedFunc(substring string, mapping Mapping, cfg *Config) (string, bool, error) { + pattern := cfg.pattern + subsFunc := cfg.substituteFunc + if subsFunc == nil { + _, subsFunc = getSubstitutionFunctionForTemplate(substring) + } + + closingBraceIndex := getFirstBraceClosingIndex(substring) + rest := "" + if closingBraceIndex > -1 { + rest = substring[closingBraceIndex+1:] + substring = substring[0 : closingBraceIndex+1] + } + + matches := pattern.FindStringSubmatch(substring) + groups := matchGroups(matches, pattern) + if escaped := groups[groupEscaped]; escaped != "" { + return escaped, true, nil + } + + braced := false + substitution := groups[groupNamed] + if substitution == "" { + substitution = groups[groupBraced] + braced = true + } + + if substitution == "" { + return "", false, &InvalidTemplateError{} + } + + if braced { + value, applied, err := subsFunc(substitution, mapping) + if err != nil { + return "", false, err } + if applied { + interpolatedNested, err := SubstituteWith(rest, mapping, pattern) + if err != nil { + return "", false, err + } + return value + interpolatedNested, true, nil + } + } + + value, ok := mapping(substitution) + if !ok && cfg.logging { + logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution) + } + + return value, ok, nil +} + +// SubstituteWith substitute variables in the string with their values. +// It accepts additional substitute function. +func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { + options := []Option{ + WithPattern(pattern), + } + if len(subsFuncs) > 0 { + options = append(options, WithSubstitutionFunction(subsFuncs[0])) } - return []SubstituteFunc{ - softDefault, - hardDefault, - requiredNonEmpty, - required, + + return SubstituteWithOptions(template, mapping, options...) +} + +func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) { + interpolationMapping := []struct { + string + SubstituteFunc + }{ + {":?", requiredErrorWhenEmptyOrUnset}, + {"?", requiredErrorWhenUnset}, + {":-", defaultWhenEmptyOrUnset}, + {"-", defaultWhenUnset}, + {":+", defaultWhenNotEmpty}, + {"+", defaultWhenSet}, } + sort.Slice(interpolationMapping, func(i, j int) bool { + idxI := strings.Index(template, interpolationMapping[i].string) + idxJ := strings.Index(template, interpolationMapping[j].string) + if idxI < 0 { + return false + } + if idxJ < 0 { + return true + } + return idxI < idxJ + }) + + return interpolationMapping[0].string, interpolationMapping[0].SubstituteFunc } func getFirstBraceClosingIndex(s string) int { @@ -203,9 +312,10 @@ func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variab } type Variable struct { - Name string - DefaultValue string - Required bool + Name string + DefaultValue string + PresenceValue string + Required bool } func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) { @@ -220,15 +330,16 @@ func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, boo values := []Variable{} for _, match := range matches { groups := matchGroups(match, pattern) - if escaped := groups["escaped"]; escaped != "" { + if escaped := groups[groupEscaped]; escaped != "" { continue } - val := groups["named"] + val := groups[groupNamed] if val == "" { - val = groups["braced"] + val = groups[groupBraced] } name := val var defaultValue string + var presenceValue string var required bool switch { case strings.Contains(val, ":?"): @@ -241,19 +352,52 @@ func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, boo name, defaultValue = partition(val, ":-") case strings.Contains(val, "-"): name, defaultValue = partition(val, "-") + case strings.Contains(val, ":+"): + name, presenceValue = partition(val, ":+") + case strings.Contains(val, "+"): + name, presenceValue = partition(val, "+") } values = append(values, Variable{ - Name: name, - DefaultValue: defaultValue, - Required: required, + Name: name, + DefaultValue: defaultValue, + PresenceValue: presenceValue, + Required: required, }) } return values, len(values) > 0 } // Soft default (fall back if unset or empty) -func softDefault(substitution string, mapping Mapping) (string, bool, error) { - sep := ":-" +func defaultWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenAbsence(substitution, mapping, true) +} + +// Hard default (fall back if-and-only-if empty) +func defaultWhenUnset(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenAbsence(substitution, mapping, false) +} + +func defaultWhenNotEmpty(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenPresence(substitution, mapping, true) +} + +func defaultWhenSet(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenPresence(substitution, mapping, false) +} + +func requiredErrorWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" }) +} + +func requiredErrorWhenUnset(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, "?", func(_ string) bool { return true }) +} + +func withDefaultWhenPresence(substitution string, mapping Mapping, notEmpty bool) (string, bool, error) { + sep := "+" + if notEmpty { + sep = ":+" + } if !strings.Contains(substitution, sep) { return "", false, nil } @@ -263,15 +407,17 @@ func softDefault(substitution string, mapping Mapping) (string, bool, error) { return "", false, err } value, ok := mapping(name) - if !ok || value == "" { + if ok && (!notEmpty || (notEmpty && value != "")) { return defaultValue, true, nil } return value, true, nil } -// Hard default (fall back if-and-only-if empty) -func hardDefault(substitution string, mapping Mapping) (string, bool, error) { +func withDefaultWhenAbsence(substitution string, mapping Mapping, emptyOrUnset bool) (string, bool, error) { sep := "-" + if emptyOrUnset { + sep = ":-" + } if !strings.Contains(substitution, sep) { return "", false, nil } @@ -281,20 +427,12 @@ func hardDefault(substitution string, mapping Mapping) (string, bool, error) { return "", false, err } value, ok := mapping(name) - if !ok { + if !ok || (emptyOrUnset && value == "") { return defaultValue, true, nil } return value, true, nil } -func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) { - return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" }) -} - -func required(substitution string, mapping Mapping) (string, bool, error) { - return withRequired(substitution, mapping, "?", func(_ string) bool { return true }) -} - func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) { if !strings.Contains(substitution, sep) { return "", false, nil @@ -306,8 +444,9 @@ func withRequired(substitution string, mapping Mapping, sep string, valid func(s } value, ok := mapping(name) if !ok || !valid(value) { - return "", true, &InvalidTemplateError{ - Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), + return "", true, &MissingRequiredError{ + Reason: errorMessage, + Variable: name, } } return value, true, nil diff --git a/vendor/github.com/compose-spec/compose-go/tree/path.go b/vendor/github.com/compose-spec/compose-go/tree/path.go new file mode 100644 index 0000000000..59c2503075 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/tree/path.go @@ -0,0 +1,67 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tree + +import "strings" + +const pathSeparator = "." + +// PathMatchAll is a token used as part of a Path to match any key at that level +// in the nested structure +const PathMatchAll = "*" + +// PathMatchList is a token used as part of a Path to match items in a list +const PathMatchList = "[]" + +// Path is a dotted path of keys to a value in a nested mapping structure. A * +// section in a path will match any key in the mapping structure. +type Path string + +// NewPath returns a new Path +func NewPath(items ...string) Path { + return Path(strings.Join(items, pathSeparator)) +} + +// Next returns a new path by append part to the current path +func (p Path) Next(part string) Path { + if p == "" { + return Path(part) + } + return Path(string(p) + pathSeparator + part) +} + +func (p Path) Parts() []string { + return strings.Split(string(p), pathSeparator) +} + +func (p Path) Matches(pattern Path) bool { + patternParts := pattern.Parts() + parts := p.Parts() + + if len(patternParts) != len(parts) { + return false + } + for index, part := range parts { + switch patternParts[index] { + case PathMatchAll, part: + continue + default: + return false + } + } + return true +} diff --git a/vendor/github.com/compose-spec/compose-go/types/bytes.go b/vendor/github.com/compose-spec/compose-go/types/bytes.go new file mode 100644 index 0000000000..4c873cded0 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/bytes.go @@ -0,0 +1,42 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UnitBytes is the bytes type +type UnitBytes int64 + +// MarshalYAML makes UnitBytes implement yaml.Marshaller +func (u UnitBytes) MarshalYAML() (interface{}, error) { + return fmt.Sprintf("%d", u), nil +} + +// MarshalJSON makes UnitBytes implement json.Marshaler +func (u UnitBytes) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%d"`, u)), nil +} + +func (u *UnitBytes) DecodeMapstructure(value interface{}) error { + v, err := units.RAMInBytes(fmt.Sprint(value)) + *u = UnitBytes(v) + return err +} diff --git a/vendor/github.com/compose-spec/compose-go/types/command.go b/vendor/github.com/compose-spec/compose-go/types/command.go new file mode 100644 index 0000000000..90d575d8cd --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/command.go @@ -0,0 +1,86 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "github.com/mattn/go-shellwords" + +// ShellCommand is a string or list of string args. +// +// When marshaled to YAML, nil command fields will be omitted if `omitempty` +// is specified as a struct tag. Explicitly empty commands (i.e. `[]` or +// empty string will serialize to an empty array (`[]`). +// +// When marshaled to JSON, the `omitempty` struct must NOT be specified. +// If the command field is nil, it will be serialized as `null`. +// Explicitly empty commands (i.e. `[]` or empty string) will serialize to +// an empty array (`[]`). +// +// The distinction between nil and explicitly empty is important to distinguish +// between an unset value and a provided, but empty, value, which should be +// preserved so that it can override any base value (e.g. container entrypoint). +// +// The different semantics between YAML and JSON are due to limitations with +// JSON marshaling + `omitempty` in the Go stdlib, while gopkg.in/yaml.v3 gives +// us more flexibility via the yaml.IsZeroer interface. +// +// In the future, it might make sense to make fields of this type be +// `*ShellCommand` to avoid this situation, but that would constitute a +// breaking change. +type ShellCommand []string + +// IsZero returns true if the slice is nil. +// +// Empty (but non-nil) slices are NOT considered zero values. +func (s ShellCommand) IsZero() bool { + // we do NOT want len(s) == 0, ONLY explicitly nil + return s == nil +} + +// MarshalYAML returns nil (which will be serialized as `null`) for nil slices +// and delegates to the standard marshaller behavior otherwise. +// +// NOTE: Typically the nil case here is not hit because IsZero has already +// short-circuited marshalling, but this ensures that the type serializes +// accurately if the `omitempty` struct tag is omitted/forgotten. +// +// A similar MarshalJSON() implementation is not needed because the Go stdlib +// already serializes nil slices to `null`, whereas gopkg.in/yaml.v3 by default +// serializes nil slices to `[]`. +func (s ShellCommand) MarshalYAML() (interface{}, error) { + if s == nil { + return nil, nil + } + return []string(s), nil +} + +func (s *ShellCommand) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + cmd, err := shellwords.Parse(v) + if err != nil { + return err + } + *s = cmd + case []interface{}: + cmd := make([]string, len(v)) + for i, s := range v { + cmd[i] = s.(string) + } + *s = cmd + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/config.go b/vendor/github.com/compose-spec/compose-go/types/config.go index b395363bd3..25e6f82ee0 100644 --- a/vendor/github.com/compose-spec/compose-go/types/config.go +++ b/vendor/github.com/compose-spec/compose-go/types/config.go @@ -18,22 +18,43 @@ package types import ( "encoding/json" + "runtime" + "strings" "github.com/mitchellh/mapstructure" ) +var ( + // isCaseInsensitiveEnvVars is true on platforms where environment variable names are treated case-insensitively. + isCaseInsensitiveEnvVars = (runtime.GOOS == "windows") +) + // ConfigDetails are the details about a group of ConfigFiles type ConfigDetails struct { Version string WorkingDir string ConfigFiles []ConfigFile - Environment map[string]string + Environment Mapping } // LookupEnv provides a lookup function for environment variables func (cd ConfigDetails) LookupEnv(key string) (string, bool) { v, ok := cd.Environment[key] - return v, ok + if !isCaseInsensitiveEnvVars || ok { + return v, ok + } + // variable names must be treated case-insensitively on some platforms (that is, Windows). + // Resolves in this way: + // * Return the value if its name matches with the passed name case-sensitively. + // * Otherwise, return the value if its lower-cased name matches lower-cased passed name. + // * The value is indefinite if multiple variables match. + lowerKey := strings.ToLower(key) + for k, v := range cd.Environment { + if strings.ToLower(k) == lowerKey { + return v, true + } + } + return "", false } // ConfigFile is a filename and the contents of the file as a Dict @@ -46,16 +67,24 @@ type ConfigFile struct { Config map[string]interface{} } +func ToConfigFiles(path []string) (f []ConfigFile) { + for _, p := range path { + f = append(f, ConfigFile{Filename: p}) + } + return +} + // Config is a full compose file configuration and model type Config struct { - Filename string `yaml:"-" json:"-"` - Name string `yaml:",omitempty" json:"name,omitempty"` - Services Services `json:"services"` - Networks Networks `yaml:",omitempty" json:"networks,omitempty"` - Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"` - Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"` - Configs Configs `yaml:",omitempty" json:"configs,omitempty"` - Extensions Extensions `yaml:",inline" json:"-"` + Filename string `yaml:"-" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Services Services `yaml:"services" json:"services"` + Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:",inline" json:"-"` + Include []IncludeConfig `yaml:"include,omitempty" json:"include,omitempty"` } // Volumes is a map of VolumeConfig diff --git a/vendor/github.com/compose-spec/compose-go/types/develop.go b/vendor/github.com/compose-spec/compose-go/types/develop.go new file mode 100644 index 0000000000..5fc10716f6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/develop.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +type DevelopConfig struct { + Watch []Trigger `json:"watch,omitempty"` +} + +type WatchAction string + +const ( + WatchActionSync WatchAction = "sync" + WatchActionRebuild WatchAction = "rebuild" + WatchActionSyncRestart WatchAction = "sync+restart" +) + +type Trigger struct { + Path string `json:"path,omitempty"` + Action WatchAction `json:"action,omitempty"` + Target string `json:"target,omitempty"` + Ignore []string `json:"ignore,omitempty"` +} diff --git a/vendor/github.com/compose-spec/compose-go/types/device.go b/vendor/github.com/compose-spec/compose-go/types/device.go new file mode 100644 index 0000000000..81b4bea4aa --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/device.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "strconv" + "strings" + + "github.com/pkg/errors" +) + +type DeviceRequest struct { + Capabilities []string `yaml:"capabilities,omitempty" json:"capabilities,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Count DeviceCount `yaml:"count,omitempty" json:"count,omitempty"` + IDs []string `yaml:"device_ids,omitempty" json:"device_ids,omitempty"` +} + +type DeviceCount int64 + +func (c *DeviceCount) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case int: + *c = DeviceCount(v) + case string: + if strings.ToLower(v) == "all" { + *c = -1 + return nil + } + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return errors.Errorf("invalid value %q, the only value allowed is 'all' or a number", v) + } + *c = DeviceCount(i) + default: + return errors.Errorf("invalid type %T for device count", v) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/duration.go b/vendor/github.com/compose-spec/compose-go/types/duration.go new file mode 100644 index 0000000000..95f562a7cf --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/duration.go @@ -0,0 +1,60 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +// Duration is a thin wrapper around time.Duration with improved JSON marshalling +type Duration time.Duration + +func (d Duration) String() string { + return time.Duration(d).String() +} + +func (d *Duration) DecodeMapstructure(value interface{}) error { + v, err := time.ParseDuration(fmt.Sprint(value)) + if err != nil { + return err + } + *d = Duration(v) + return nil +} + +// MarshalJSON makes Duration implement json.Marshaler +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// MarshalYAML makes Duration implement yaml.Marshaler +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), "\"") + timeDuration, err := time.ParseDuration(s) + if err != nil { + return err + } + *d = Duration(timeDuration) + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/healthcheck.go b/vendor/github.com/compose-spec/compose-go/types/healthcheck.go new file mode 100644 index 0000000000..1bbf5e9e21 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/healthcheck.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test HealthCheckTest `yaml:"test,omitempty" json:"test,omitempty"` + Timeout *Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` + Interval *Duration `yaml:"interval,omitempty" json:"interval,omitempty"` + Retries *uint64 `yaml:"retries,omitempty" json:"retries,omitempty"` + StartPeriod *Duration `yaml:"start_period,omitempty" json:"start_period,omitempty"` + StartInterval *Duration `yaml:"start_interval,omitempty" json:"start_interval,omitempty"` + Disable bool `yaml:"disable,omitempty" json:"disable,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline" json:"-"` +} + +// HealthCheckTest is the command run to test the health of a service +type HealthCheckTest []string + +func (l *HealthCheckTest) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{"CMD-SHELL", v} + case []interface{}: + seq := make([]string, len(v)) + for i, e := range v { + seq[i] = e.(string) + } + *l = seq + default: + return fmt.Errorf("unexpected value type %T for healthcheck.test", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/labels.go b/vendor/github.com/compose-spec/compose-go/types/labels.go new file mode 100644 index 0000000000..f30699b575 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/labels.go @@ -0,0 +1,80 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "strings" +) + +// Labels is a mapping type for labels +type Labels map[string]string + +func (l Labels) Add(key, value string) Labels { + if l == nil { + l = Labels{} + } + l[key] = value + return l +} + +func (l Labels) AsList() []string { + s := make([]string, len(l)) + i := 0 + for k, v := range l { + s[i] = fmt.Sprintf("%s=%s", k, v) + i++ + } + return s +} + +// label value can be a string | number | boolean | null (empty) +func labelValue(e interface{}) string { + if e == nil { + return "" + } + switch v := e.(type) { + case string: + return v + default: + return fmt.Sprint(v) + } +} + +func (l *Labels) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + labels := make(map[string]string, len(v)) + for k, e := range v { + labels[k] = labelValue(e) + } + *l = labels + case []interface{}: + labels := make(map[string]string, len(v)) + for _, s := range v { + k, e, ok := strings.Cut(fmt.Sprint(s), "=") + if !ok { + return fmt.Errorf("invalid label %q", v) + } + labels[k] = labelValue(e) + } + *l = labels + default: + return fmt.Errorf("unexpected value type %T for labels", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/options.go b/vendor/github.com/compose-spec/compose-go/types/options.go new file mode 100644 index 0000000000..7ae85793d1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/options.go @@ -0,0 +1,46 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// Options is a mapping type for options we pass as-is to container runtime +type Options map[string]string + +func (d *Options) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + m := make(map[string]string) + for key, e := range v { + if e == nil { + m[key] = "" + } else { + m[key] = fmt.Sprint(e) + } + } + *d = m + case map[string]string: + *d = v + default: + return errors.Errorf("invalid type %T for options", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/project.go b/vendor/github.com/compose-spec/compose-go/types/project.go index dc208ed587..713b207470 100644 --- a/vendor/github.com/compose-spec/compose-go/types/project.go +++ b/vendor/github.com/compose-spec/compose-go/types/project.go @@ -17,35 +17,48 @@ package types import ( + "bytes" + "encoding/json" "fmt" "os" "path/filepath" "sort" - "github.com/distribution/distribution/v3/reference" - "github.com/opencontainers/go-digest" + "github.com/compose-spec/compose-go/dotenv" + "github.com/compose-spec/compose-go/utils" + "github.com/distribution/reference" + godigest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v3" ) // Project is the result of loading a set of compose files type Project struct { - Name string `yaml:"name,omitempty" json:"name,omitempty"` - WorkingDir string `yaml:"-" json:"-"` - Services Services `json:"services"` - Networks Networks `yaml:",omitempty" json:"networks,omitempty"` - Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"` - Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"` - Configs Configs `yaml:",omitempty" json:"configs,omitempty"` - Extensions Extensions `yaml:",inline" json:"-"` // https://github.com/golang/go/issues/6213 - ComposeFiles []string `yaml:"-" json:"-"` - Environment map[string]string `yaml:"-" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + WorkingDir string `yaml:"-" json:"-"` + Services Services `yaml:"services" json:"services"` + Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` // https://github.com/golang/go/issues/6213 + + // IncludeReferences is keyed by Compose YAML filename and contains config for + // other Compose YAML files it directly triggered a load of via `include`. + // + // Note: this is + IncludeReferences map[string][]IncludeConfig `yaml:"-" json:"-"` + ComposeFiles []string `yaml:"-" json:"-"` + Environment Mapping `yaml:"-" json:"-"` // DisabledServices track services which have been disable as profile is not active DisabledServices Services `yaml:"-" json:"-"` + Profiles []string `yaml:"-" json:"-"` } // ServiceNames return names for all services in this Compose config -func (p Project) ServiceNames() []string { +func (p *Project) ServiceNames() []string { var names []string for _, s := range p.Services { names = append(names, s.Name) @@ -55,7 +68,7 @@ func (p Project) ServiceNames() []string { } // VolumeNames return names for all volumes in this Compose config -func (p Project) VolumeNames() []string { +func (p *Project) VolumeNames() []string { var names []string for k := range p.Volumes { names = append(names, k) @@ -65,7 +78,7 @@ func (p Project) VolumeNames() []string { } // NetworkNames return names for all volumes in this Compose config -func (p Project) NetworkNames() []string { +func (p *Project) NetworkNames() []string { var names []string for k := range p.Networks { names = append(names, k) @@ -75,7 +88,7 @@ func (p Project) NetworkNames() []string { } // SecretNames return names for all secrets in this Compose config -func (p Project) SecretNames() []string { +func (p *Project) SecretNames() []string { var names []string for k := range p.Secrets { names = append(names, k) @@ -85,7 +98,7 @@ func (p Project) SecretNames() []string { } // ConfigNames return names for all configs in this Compose config -func (p Project) ConfigNames() []string { +func (p *Project) ConfigNames() []string { var names []string for k := range p.Configs { names = append(names, k) @@ -95,11 +108,20 @@ func (p Project) ConfigNames() []string { } // GetServices retrieve services by names, or return all services if no name specified -func (p Project) GetServices(names ...string) (Services, error) { +func (p *Project) GetServices(names ...string) (Services, error) { + services, servicesNotFound := p.getServicesByNames(names...) + if len(servicesNotFound) > 0 { + return services, fmt.Errorf("no such service: %s", servicesNotFound[0]) + } + return services, nil +} + +func (p *Project) getServicesByNames(names ...string) (Services, []string) { if len(names) == 0 { return p.Services, nil } services := Services{} + var servicesNotFound []string for _, name := range names { var serviceConfig *ServiceConfig for _, s := range p.Services { @@ -109,15 +131,26 @@ func (p Project) GetServices(names ...string) (Services, error) { } } if serviceConfig == nil { - return services, fmt.Errorf("no such service: %s", name) + servicesNotFound = append(servicesNotFound, name) + continue } services = append(services, *serviceConfig) } - return services, nil + return services, servicesNotFound +} + +// GetDisabledService retrieve disabled service by name +func (p Project) GetDisabledService(name string) (ServiceConfig, error) { + for _, config := range p.DisabledServices { + if config.Name == name { + return config, nil + } + } + return ServiceConfig{}, fmt.Errorf("no such service: %s", name) } // GetService retrieve a specific service by name -func (p Project) GetService(name string) (ServiceConfig, error) { +func (p *Project) GetService(name string) (ServiceConfig, error) { services, err := p.GetServices(name) if err != nil { return ServiceConfig{}, err @@ -128,7 +161,7 @@ func (p Project) GetService(name string) (ServiceConfig, error) { return services[0], nil } -func (p Project) AllServices() Services { +func (p *Project) AllServices() Services { var all Services all = append(all, p.Services...) all = append(all, p.DisabledServices...) @@ -137,23 +170,44 @@ func (p Project) AllServices() Services { type ServiceFunc func(service ServiceConfig) error -// WithServices run ServiceFunc on each service and dependencies in dependency order -func (p Project) WithServices(names []string, fn ServiceFunc) error { - return p.withServices(names, fn, map[string]bool{}) +// WithServices run ServiceFunc on each service and dependencies according to DependencyPolicy +func (p *Project) WithServices(names []string, fn ServiceFunc, options ...DependencyOption) error { + if len(options) == 0 { + // backward compatibility + options = []DependencyOption{IncludeDependencies} + } + return p.withServices(names, fn, map[string]bool{}, options, map[string]ServiceDependency{}) } -func (p Project) withServices(names []string, fn ServiceFunc, done map[string]bool) error { - services, err := p.GetServices(names...) - if err != nil { - return err +func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption, dependencies map[string]ServiceDependency) error { + services, servicesNotFound := p.getServicesByNames(names...) + if len(servicesNotFound) > 0 { + for _, serviceNotFound := range servicesNotFound { + if dependency, ok := dependencies[serviceNotFound]; !ok || dependency.Required { + return fmt.Errorf("no such service: %s", serviceNotFound) + } + } } for _, service := range services { - if done[service.Name] { + if seen[service.Name] { continue } - dependencies := service.GetDependencies() + seen[service.Name] = true + var dependencies map[string]ServiceDependency + for _, policy := range options { + switch policy { + case IncludeDependents: + dependencies = utils.MapsAppend(dependencies, p.dependentsForService(service)) + case IncludeDependencies: + dependencies = utils.MapsAppend(dependencies, service.DependsOn) + case IgnoreDependencies: + // Noop + default: + return fmt.Errorf("unsupported dependency policy %d", policy) + } + } if len(dependencies) > 0 { - err := p.withServices(dependencies, fn, done) + err := p.withServices(utils.MapKeys(dependencies), fn, seen, options, dependencies) if err != nil { return err } @@ -161,11 +215,26 @@ func (p Project) withServices(names []string, fn ServiceFunc, done map[string]bo if err := fn(service); err != nil { return err } - done[service.Name] = true } return nil } +func (p *Project) GetDependentsForService(s ServiceConfig) []string { + return utils.MapKeys(p.dependentsForService(s)) +} + +func (p *Project) dependentsForService(s ServiceConfig) map[string]ServiceDependency { + dependent := make(map[string]ServiceDependency) + for _, service := range p.Services { + for name, dependency := range service.DependsOn { + if name == s.Name { + dependent[service.Name] = dependency + } + } + } + return dependent +} + // RelativePath resolve a relative path based project's working directory func (p *Project) RelativePath(path string) string { if path[0] == '~' { @@ -216,7 +285,7 @@ func (p *Project) ApplyProfiles(profiles []string) { } } var enabled, disabled Services - for _, service := range p.Services { + for _, service := range p.AllServices() { if service.HasProfile(profiles) { enabled = append(enabled, service) } else { @@ -225,6 +294,41 @@ func (p *Project) ApplyProfiles(profiles []string) { } p.Services = enabled p.DisabledServices = disabled + p.Profiles = profiles +} + +// EnableServices ensure services are enabled and activate profiles accordingly +func (p *Project) EnableServices(names ...string) error { + if len(names) == 0 { + return nil + } + var enabled []string + for _, name := range names { + _, err := p.GetService(name) + if err == nil { + // already enabled + continue + } + def, err := p.GetDisabledService(name) + if err != nil { + return err + } + enabled = append(enabled, def.Profiles...) + } + + profiles := p.Profiles +PROFILES: + for _, profile := range enabled { + for _, p := range profiles { + if p == profile { + continue PROFILES + } + } + profiles = append(profiles, profile) + } + p.ApplyProfiles(profiles) + + return p.ResolveServicesEnvironment(true) } // WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services @@ -246,6 +350,11 @@ func (p *Project) WithoutUnnecessaryResources() { for _, v := range s.Secrets { requiredSecrets[v.Source] = struct{}{} } + if s.Build != nil { + for _, v := range s.Build.Secrets { + requiredSecrets[v.Source] = struct{}{} + } + } for _, v := range s.Configs { requiredConfigs[v.Source] = struct{}{} } @@ -253,31 +362,47 @@ func (p *Project) WithoutUnnecessaryResources() { networks := Networks{} for k := range requiredNetworks { - networks[k] = p.Networks[k] + if value, ok := p.Networks[k]; ok { + networks[k] = value + } } p.Networks = networks volumes := Volumes{} for k := range requiredVolumes { - volumes[k] = p.Volumes[k] + if value, ok := p.Volumes[k]; ok { + volumes[k] = value + } } p.Volumes = volumes secrets := Secrets{} for k := range requiredSecrets { - secrets[k] = p.Secrets[k] + if value, ok := p.Secrets[k]; ok { + secrets[k] = value + } } p.Secrets = secrets configs := Configs{} for k := range requiredConfigs { - configs[k] = p.Configs[k] + if value, ok := p.Configs[k]; ok { + configs[k] = value + } } p.Configs = configs } -// ForServices restrict the project model to a subset of services -func (p *Project) ForServices(names []string) error { +type DependencyOption int + +const ( + IncludeDependencies = iota + IncludeDependents + IgnoreDependencies +) + +// ForServices restrict the project model to selected services and dependencies +func (p *Project) ForServices(names []string, options ...DependencyOption) error { if len(names) == 0 { // All services return nil @@ -287,7 +412,7 @@ func (p *Project) ForServices(names []string) error { err := p.WithServices(names, func(service ServiceConfig) error { set[service.Name] = struct{}{} return nil - }) + }, options...) if err != nil { return err } @@ -296,17 +421,40 @@ func (p *Project) ForServices(names []string) error { var enabled Services for _, s := range p.Services { if _, ok := set[s.Name]; ok { + for _, option := range options { + if option == IgnoreDependencies { + // remove all dependencies but those implied by explicitly selected services + dependencies := s.DependsOn + for d := range dependencies { + if _, ok := set[d]; !ok { + delete(dependencies, d) + } + } + s.DependsOn = dependencies + } + } enabled = append(enabled, s) } else { - p.DisabledServices = append(p.DisabledServices, s) + p.DisableService(s) } } p.Services = enabled return nil } +func (p *Project) DisableService(service ServiceConfig) { + // We should remove all dependencies which reference the disabled service + for i, s := range p.Services { + if _, ok := s.DependsOn[service.Name]; ok { + delete(s.DependsOn, service.Name) + p.Services[i] = s + } + } + p.DisabledServices = append(p.DisabledServices, service) +} + // ResolveImages updates services images to include digest computed by a resolver function -func (p *Project) ResolveImages(resolver func(named reference.Named) (digest.Digest, error)) error { +func (p *Project) ResolveImages(resolver func(named reference.Named) (godigest.Digest, error)) error { eg := errgroup.Group{} for i, s := range p.Services { idx := i @@ -340,3 +488,79 @@ func (p *Project) ResolveImages(resolver func(named reference.Named) (digest.Dig } return eg.Wait() } + +// MarshalYAML marshal Project into a yaml tree +func (p *Project) MarshalYAML() ([]byte, error) { + buf := bytes.NewBuffer([]byte{}) + encoder := yaml.NewEncoder(buf) + encoder.SetIndent(2) + // encoder.CompactSeqIndent() FIXME https://github.com/go-yaml/yaml/pull/753 + err := encoder.Encode(p) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSON makes Config implement json.Marshaler +func (p *Project) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "name": p.Name, + "services": p.Services, + } + + if len(p.Networks) > 0 { + m["networks"] = p.Networks + } + if len(p.Volumes) > 0 { + m["volumes"] = p.Volumes + } + if len(p.Secrets) > 0 { + m["secrets"] = p.Secrets + } + if len(p.Configs) > 0 { + m["configs"] = p.Configs + } + for k, v := range p.Extensions { + m[k] = v + } + return json.Marshal(m) +} + +// ResolveServicesEnvironment parse env_files set for services to resolve the actual environment map for services +func (p Project) ResolveServicesEnvironment(discardEnvFiles bool) error { + for i, service := range p.Services { + service.Environment = service.Environment.Resolve(p.Environment.Resolve) + + environment := MappingWithEquals{} + // resolve variables based on other files we already parsed, + project's environment + var resolve dotenv.LookupFn = func(s string) (string, bool) { + v, ok := environment[s] + if ok && v != nil { + return *v, ok + } + return p.Environment.Resolve(s) + } + + for _, envFile := range service.EnvFile { + b, err := os.ReadFile(envFile) + if err != nil { + return errors.Wrapf(err, "Failed to load %s", envFile) + } + + fileVars, err := dotenv.ParseWithLookup(bytes.NewBuffer(b), resolve) + if err != nil { + return errors.Wrapf(err, "failed to read %s", envFile) + } + environment.OverrideBy(Mapping(fileVars).ToMappingWithEquals()) + } + + service.Environment = environment.OverrideBy(service.Environment) + + if discardEnvFiles { + service.EnvFile = nil + } + p.Services[i] = service + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/stringOrList.go b/vendor/github.com/compose-spec/compose-go/types/stringOrList.go new file mode 100644 index 0000000000..3d91ad2a5c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/types/stringOrList.go @@ -0,0 +1,61 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// StringList is a type for fields that can be a string or list of strings +type StringList []string + +func (l *StringList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{v} + case []interface{}: + list := make([]string, len(v)) + for i, e := range v { + list[i] = e.(string) + } + *l = list + default: + return errors.Errorf("invalid type %T for string list", value) + } + return nil +} + +// StringOrNumberList is a type for fields that can be a list of strings or numbers +type StringOrNumberList []string + +func (l *StringOrNumberList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{v} + case []interface{}: + list := make([]string, len(v)) + for i, e := range v { + list[i] = fmt.Sprint(e) + } + *l = list + default: + return errors.Errorf("invalid type %T for string list", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/types/types.go b/vendor/github.com/compose-spec/compose-go/types/types.go index ec4b0bc73f..0407462a3f 100644 --- a/vendor/github.com/compose-spec/compose-go/types/types.go +++ b/vendor/github.com/compose-spec/compose-go/types/types.go @@ -21,47 +21,10 @@ import ( "fmt" "sort" "strings" - "time" "github.com/docker/go-connections/nat" ) -// Duration is a thin wrapper around time.Duration with improved JSON marshalling -type Duration time.Duration - -func (d Duration) String() string { - return time.Duration(d).String() -} - -// ConvertDurationPtr converts a type defined Duration pointer to a time.Duration pointer with the same value. -func ConvertDurationPtr(d *Duration) *time.Duration { - if d == nil { - return nil - } - res := time.Duration(*d) - return &res -} - -// MarshalJSON makes Duration implement json.Marshaler -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// MarshalYAML makes Duration implement yaml.Marshaler -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -func (d *Duration) UnmarshalJSON(b []byte) error { - s := strings.Trim(string(b), "\"") - timeDuration, err := time.ParseDuration(s) - if err != nil { - return err - } - *d = Duration(timeDuration) - return nil -} - // Services is a list of ServiceConfig type Services []ServiceConfig @@ -86,94 +49,126 @@ func (s Services) MarshalJSON() ([]byte, error) { // ServiceConfig is the configuration of one service type ServiceConfig struct { Name string `yaml:"-" json:"-"` - Profiles []string `mapstructure:"profiles" yaml:"profiles,omitempty" json:"profiles,omitempty"` - - Build *BuildConfig `yaml:",omitempty" json:"build,omitempty"` - BlkioConfig *BlkioConfig `mapstructure:"blkio_config" yaml:",omitempty" json:"blkio_config,omitempty"` - CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"` - CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` - CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` - CPUCount int64 `mapstructure:"cpu_count" yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"` - CPUPercent float32 `mapstructure:"cpu_percent" yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"` - CPUPeriod int64 `mapstructure:"cpu_period" yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"` - CPUQuota int64 `mapstructure:"cpu_quota" yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"` - CPURTPeriod int64 `mapstructure:"cpu_rt_period" yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"` - CPURTRuntime int64 `mapstructure:"cpu_rt_runtime" yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"` - CPUS float32 `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` - CPUSet string `mapstructure:"cpuset" yaml:"cpuset,omitempty" json:"cpuset,omitempty"` - CPUShares int64 `mapstructure:"cpu_shares" yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"` - Command ShellCommand `yaml:",omitempty" json:"command,omitempty"` - Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"` - ContainerName string `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"` - CredentialSpec *CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` - DependsOn DependsOnConfig `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"` - Deploy *DeployConfig `yaml:",omitempty" json:"deploy,omitempty"` - DeviceCgroupRules []string `mapstructure:"device_cgroup_rules" yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"` - Devices []string `yaml:",omitempty" json:"devices,omitempty"` - DNS StringList `yaml:",omitempty" json:"dns,omitempty"` - DNSOpts []string `mapstructure:"dns_opt" yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"` - DNSSearch StringList `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"` - Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` - DomainName string `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"` - Entrypoint ShellCommand `yaml:",omitempty" json:"entrypoint,omitempty"` - Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"` - EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"` - Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"` - Extends ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"` - ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"` - ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` - GroupAdd []string `mapstructure:"group_add" yaml:"group_add,omitempty" json:"group_add,omitempty"` - Hostname string `yaml:",omitempty" json:"hostname,omitempty"` - HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"` - Image string `yaml:",omitempty" json:"image,omitempty"` - Init *bool `yaml:",omitempty" json:"init,omitempty"` - Ipc string `yaml:",omitempty" json:"ipc,omitempty"` - Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - CustomLabels Labels `yaml:"-" json:"-"` - Links []string `yaml:",omitempty" json:"links,omitempty"` - Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"` - LogDriver string `mapstructure:"log_driver" yaml:"log_driver,omitempty" json:"log_driver,omitempty"` - LogOpt map[string]string `mapstructure:"log_opt" yaml:"log_opt,omitempty" json:"log_opt,omitempty"` - MemLimit UnitBytes `mapstructure:"mem_limit" yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"` - MemReservation UnitBytes `mapstructure:"mem_reservation" yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"` - MemSwapLimit UnitBytes `mapstructure:"memswap_limit" yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"` - MemSwappiness UnitBytes `mapstructure:"mem_swappiness" yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"` - MacAddress string `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"` - Net string `yaml:"net,omitempty" json:"net,omitempty"` - NetworkMode string `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"` - Networks map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"` - OomKillDisable bool `mapstructure:"oom_kill_disable" yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"` - OomScoreAdj int64 `mapstructure:"oom_score_adj" yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"` - Pid string `yaml:",omitempty" json:"pid,omitempty"` - PidsLimit int64 `mapstructure:"pids_limit" yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"` - Platform string `yaml:",omitempty" json:"platform,omitempty"` - Ports []ServicePortConfig `yaml:",omitempty" json:"ports,omitempty"` - Privileged bool `yaml:",omitempty" json:"privileged,omitempty"` - PullPolicy string `mapstructure:"pull_policy" yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"` - ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` - Restart string `yaml:",omitempty" json:"restart,omitempty"` - Runtime string `yaml:",omitempty" json:"runtime,omitempty"` - Scale int `yaml:"-" json:"-"` - Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"` - SecurityOpt []string `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"` - ShmSize UnitBytes `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"` - StdinOpen bool `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` - StopGracePeriod *Duration `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` - StopSignal string `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` - Sysctls Mapping `yaml:",omitempty" json:"sysctls,omitempty"` - Tmpfs StringList `yaml:",omitempty" json:"tmpfs,omitempty"` - Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"` - Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"` - User string `yaml:",omitempty" json:"user,omitempty"` - UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` - Uts string `yaml:"uts,omitempty" json:"uts,omitempty"` - VolumeDriver string `mapstructure:"volume_driver" yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"` - Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"` - VolumesFrom []string `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` - WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"` - - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Profiles []string `yaml:"profiles,omitempty" json:"profiles,omitempty"` + + Annotations Mapping `yaml:"annotations,omitempty" json:"annotations,omitempty"` + Attach *bool `yaml:"attach,omitempty" json:"attach,omitempty"` + Build *BuildConfig `yaml:"build,omitempty" json:"build,omitempty"` + Develop *DevelopConfig `yaml:"develop,omitempty" json:"develop,omitempty"` + BlkioConfig *BlkioConfig `yaml:"blkio_config,omitempty" json:"blkio_config,omitempty"` + CapAdd []string `yaml:"cap_add,omitempty" json:"cap_add,omitempty"` + CapDrop []string `yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` + CgroupParent string `yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` + Cgroup string `yaml:"cgroup,omitempty" json:"cgroup,omitempty"` + CPUCount int64 `yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"` + CPUPercent float32 `yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"` + CPUPeriod int64 `yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"` + CPUQuota int64 `yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"` + CPURTPeriod int64 `yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"` + CPURTRuntime int64 `yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"` + CPUS float32 `yaml:"cpus,omitempty" json:"cpus,omitempty"` + CPUSet string `yaml:"cpuset,omitempty" json:"cpuset,omitempty"` + CPUShares int64 `yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"` + + // Command for the service containers. + // If set, overrides COMMAND from the image. + // + // Set to `[]` or an empty string to clear the command from the image. + Command ShellCommand `yaml:"command,omitempty" json:"command"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. + + Configs []ServiceConfigObjConfig `yaml:"configs,omitempty" json:"configs,omitempty"` + ContainerName string `yaml:"container_name,omitempty" json:"container_name,omitempty"` + CredentialSpec *CredentialSpecConfig `yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` + DependsOn DependsOnConfig `yaml:"depends_on,omitempty" json:"depends_on,omitempty"` + Deploy *DeployConfig `yaml:"deploy,omitempty" json:"deploy,omitempty"` + DeviceCgroupRules []string `yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"` + Devices []string `yaml:"devices,omitempty" json:"devices,omitempty"` + DNS StringList `yaml:"dns,omitempty" json:"dns,omitempty"` + DNSOpts []string `yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"` + DNSSearch StringList `yaml:"dns_search,omitempty" json:"dns_search,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DomainName string `yaml:"domainname,omitempty" json:"domainname,omitempty"` + + // Entrypoint for the service containers. + // If set, overrides ENTRYPOINT from the image. + // + // Set to `[]` or an empty string to clear the entrypoint from the image. + Entrypoint ShellCommand `yaml:"entrypoint,omitempty" json:"entrypoint"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. + + Environment MappingWithEquals `yaml:"environment,omitempty" json:"environment,omitempty"` + EnvFile StringList `yaml:"env_file,omitempty" json:"env_file,omitempty"` + Expose StringOrNumberList `yaml:"expose,omitempty" json:"expose,omitempty"` + Extends *ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"` + ExternalLinks []string `yaml:"external_links,omitempty" json:"external_links,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + GroupAdd []string `yaml:"group_add,omitempty" json:"group_add,omitempty"` + Hostname string `yaml:"hostname,omitempty" json:"hostname,omitempty"` + HealthCheck *HealthCheckConfig `yaml:"healthcheck,omitempty" json:"healthcheck,omitempty"` + Image string `yaml:"image,omitempty" json:"image,omitempty"` + Init *bool `yaml:"init,omitempty" json:"init,omitempty"` + Ipc string `yaml:"ipc,omitempty" json:"ipc,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CustomLabels Labels `yaml:"-" json:"-"` + Links []string `yaml:"links,omitempty" json:"links,omitempty"` + Logging *LoggingConfig `yaml:"logging,omitempty" json:"logging,omitempty"` + LogDriver string `yaml:"log_driver,omitempty" json:"log_driver,omitempty"` + LogOpt map[string]string `yaml:"log_opt,omitempty" json:"log_opt,omitempty"` + MemLimit UnitBytes `yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"` + MemReservation UnitBytes `yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"` + MemSwapLimit UnitBytes `yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"` + MemSwappiness UnitBytes `yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"` + MacAddress string `yaml:"mac_address,omitempty" json:"mac_address,omitempty"` + Net string `yaml:"net,omitempty" json:"net,omitempty"` + NetworkMode string `yaml:"network_mode,omitempty" json:"network_mode,omitempty"` + Networks map[string]*ServiceNetworkConfig `yaml:"networks,omitempty" json:"networks,omitempty"` + OomKillDisable bool `yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"` + OomScoreAdj int64 `yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"` + Pid string `yaml:"pid,omitempty" json:"pid,omitempty"` + PidsLimit int64 `yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"` + Platform string `yaml:"platform,omitempty" json:"platform,omitempty"` + Ports []ServicePortConfig `yaml:"ports,omitempty" json:"ports,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + PullPolicy string `yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` + Restart string `yaml:"restart,omitempty" json:"restart,omitempty"` + Runtime string `yaml:"runtime,omitempty" json:"runtime,omitempty"` + Scale int `yaml:"scale,omitempty" json:"scale,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + SecurityOpt []string `yaml:"security_opt,omitempty" json:"security_opt,omitempty"` + ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + StdinOpen bool `yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` + StopGracePeriod *Duration `yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` + StopSignal string `yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` + Sysctls Mapping `yaml:"sysctls,omitempty" json:"sysctls,omitempty"` + Tmpfs StringList `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"` + Tty bool `yaml:"tty,omitempty" json:"tty,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` + User string `yaml:"user,omitempty" json:"user,omitempty"` + UserNSMode string `yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` + Uts string `yaml:"uts,omitempty" json:"uts,omitempty"` + VolumeDriver string `yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"` + Volumes []ServiceVolumeConfig `yaml:"volumes,omitempty" json:"volumes,omitempty"` + VolumesFrom []string `yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline" json:"-"` +} + +// MarshalYAML makes ServiceConfig implement yaml.Marshaller +func (s ServiceConfig) MarshalYAML() (interface{}, error) { + type t ServiceConfig + value := t(s) + value.Scale = 0 // deprecated, but default value "1" doesn't match omitempty + return value, nil +} + +// MarshalJSON makes SSHKey implement json.Marshaller +func (s ServiceConfig) MarshalJSON() ([]byte, error) { + type t ServiceConfig + value := t(s) + value.Scale = 0 // deprecated, but default value "1" doesn't match omitempty + return json.Marshal(value) } // NetworksByPriority return the service networks IDs sorted according to Priority @@ -204,26 +199,26 @@ func (s *ServiceConfig) NetworksByPriority() []string { } const ( - //PullPolicyAlways always pull images + // PullPolicyAlways always pull images PullPolicyAlways = "always" - //PullPolicyNever never pull images + // PullPolicyNever never pull images PullPolicyNever = "never" - //PullPolicyIfNotPresent pull missing images + // PullPolicyIfNotPresent pull missing images PullPolicyIfNotPresent = "if_not_present" - //PullPolicyMissing pull missing images + // PullPolicyMissing pull missing images PullPolicyMissing = "missing" - //PullPolicyBuild force building images + // PullPolicyBuild force building images PullPolicyBuild = "build" ) const ( - //RestartPolicyAlways always restart the container if it stops + // RestartPolicyAlways always restart the container if it stops RestartPolicyAlways = "always" - //RestartPolicyOnFailure restart the container if it exits due to an error + // RestartPolicyOnFailure restart the container if it exits due to an error RestartPolicyOnFailure = "on-failure" - //RestartPolicyNo do not automatically restart the container + // RestartPolicyNo do not automatically restart the container RestartPolicyNo = "no" - //RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise) + // RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise) RestartPolicyUnlessStopped = "unless-stopped" ) @@ -241,42 +236,32 @@ const ( NetworkModeContainerPrefix = ContainerPrefix ) -// GetDependencies retrieve all services this service depends on +// GetDependencies retrieves all services this service depends on func (s ServiceConfig) GetDependencies() []string { - dependencies := make(set) - for dependency := range s.DependsOn { - dependencies.append(dependency) - } - for _, link := range s.Links { - parts := strings.Split(link, ":") - if len(parts) == 2 { - dependencies.append(parts[0]) - } else { - dependencies.append(link) - } - } - if strings.HasPrefix(s.NetworkMode, ServicePrefix) { - dependencies.append(s.NetworkMode[len(ServicePrefix):]) - } - if strings.HasPrefix(s.Ipc, ServicePrefix) { - dependencies.append(s.Ipc[len(ServicePrefix):]) + var dependencies []string + for service := range s.DependsOn { + dependencies = append(dependencies, service) } - if strings.HasPrefix(s.Pid, ServicePrefix) { - dependencies.append(s.Pid[len(ServicePrefix):]) - } - for _, vol := range s.VolumesFrom { - if !strings.HasPrefix(s.Pid, ContainerPrefix) { - dependencies.append(vol) + return dependencies +} + +// GetDependents retrieves all services which depend on this service +func (s ServiceConfig) GetDependents(p *Project) []string { + var dependent []string + for _, service := range p.Services { + for name := range service.DependsOn { + if name == s.Name { + dependent = append(dependent, service.Name) + } } } - - return dependencies.toSlice() + return dependent } type set map[string]struct{} -func (s set) append(strings ...string) { - for _, str := range strings { +func (s set) append(strs ...string) { + for _, str := range strs { s[str] = struct{}{} } } @@ -291,33 +276,39 @@ func (s set) toSlice() []string { // BuildConfig is a type for build type BuildConfig struct { - Context string `yaml:",omitempty" json:"context,omitempty"` - Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"` - Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"` - SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"` - CacheTo StringList `mapstructure:"cache_to" yaml:"cache_to,omitempty" json:"cache_to,omitempty"` - NoCache bool `mapstructure:"no_cache" yaml:"no_cache,omitempty" json:"no_cache,omitempty"` - Pull bool `mapstructure:"pull" yaml:"pull,omitempty" json:"pull,omitempty"` - ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` - Isolation string `yaml:",omitempty" json:"isolation,omitempty"` - Network string `yaml:",omitempty" json:"network,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` - - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Context string `yaml:"context,omitempty" json:"context,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DockerfileInline string `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"` + Args MappingWithEquals `yaml:"args,omitempty" json:"args,omitempty"` + SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CacheFrom StringList `yaml:"cache_from,omitempty" json:"cache_from,omitempty"` + CacheTo StringList `yaml:"cache_to,omitempty" json:"cache_to,omitempty"` + NoCache bool `yaml:"no_cache,omitempty" json:"no_cache,omitempty"` + AdditionalContexts Mapping `yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"` + Pull bool `yaml:"pull,omitempty" json:"pull,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Network string `yaml:"network,omitempty" json:"network,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Tags StringList `yaml:"tags,omitempty" json:"tags,omitempty"` + Platforms StringList `yaml:"platforms,omitempty" json:"platforms,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // BlkioConfig define blkio config type BlkioConfig struct { - Weight uint16 `yaml:",omitempty" json:"weight,omitempty"` - WeightDevice []WeightDevice `mapstructure:"weight_device" yaml:",omitempty" json:"weight_device,omitempty"` - DeviceReadBps []ThrottleDevice `mapstructure:"device_read_bps" yaml:",omitempty" json:"device_read_bps,omitempty"` - DeviceReadIOps []ThrottleDevice `mapstructure:"device_read_iops" yaml:",omitempty" json:"device_read_iops,omitempty"` - DeviceWriteBps []ThrottleDevice `mapstructure:"device_write_bps" yaml:",omitempty" json:"device_write_bps,omitempty"` - DeviceWriteIOps []ThrottleDevice `mapstructure:"device_write_iops" yaml:",omitempty" json:"device_write_iops,omitempty"` + Weight uint16 `yaml:"weight,omitempty" json:"weight,omitempty"` + WeightDevice []WeightDevice `yaml:"weight_device,omitempty" json:"weight_device,omitempty"` + DeviceReadBps []ThrottleDevice `yaml:"device_read_bps,omitempty" json:"device_read_bps,omitempty"` + DeviceReadIOps []ThrottleDevice `yaml:"device_read_iops,omitempty" json:"device_read_iops,omitempty"` + DeviceWriteBps []ThrottleDevice `yaml:"device_write_bps,omitempty" json:"device_write_bps,omitempty"` + DeviceWriteIOps []ThrottleDevice `yaml:"device_write_iops,omitempty" json:"device_write_iops,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // WeightDevice is a structure that holds device:weight pair @@ -325,27 +316,17 @@ type WeightDevice struct { Path string Weight uint16 - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // ThrottleDevice is a structure that holds device:rate_per_second pair type ThrottleDevice struct { Path string - Rate uint64 + Rate UnitBytes - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } -// ShellCommand is a string or list of string args -type ShellCommand []string - -// StringList is a type for fields that can be a string or list of strings -type StringList []string - -// StringOrNumberList is a type for fields that can be a list of strings or -// numbers -type StringOrNumberList []string - // MappingWithEquals is a mapping type that can be converted from a list of // key[=value] strings. // For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`. @@ -418,15 +399,47 @@ func NewMapping(values []string) Mapping { return mapping } -// Labels is a mapping type for labels -type Labels map[string]string +// convert values into a set of KEY=VALUE strings +func (m Mapping) Values() []string { + values := make([]string, 0, len(m)) + for k, v := range m { + values = append(values, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(values) + return values +} -func (l Labels) Add(key, value string) Labels { - if l == nil { - l = Labels{} +// ToMappingWithEquals converts Mapping into a MappingWithEquals with pointer references +func (m Mapping) ToMappingWithEquals() MappingWithEquals { + mapping := MappingWithEquals{} + for k, v := range m { + v := v + mapping[k] = &v } - l[key] = value - return l + return mapping +} + +func (m Mapping) Resolve(s string) (string, bool) { + v, ok := m[s] + return v, ok +} + +func (m Mapping) Clone() Mapping { + clone := Mapping{} + for k, v := range m { + clone[k] = v + } + return clone +} + +// Merge adds all values from second mapping which are not already defined +func (m Mapping) Merge(o Mapping) Mapping { + for k, v := range o { + if _, set := m[k]; !set { + m[k] = v + } + } + return m } type SSHKey struct { @@ -457,9 +470,9 @@ func (s SSHKey) MarshalYAML() (interface{}, error) { // MarshalJSON makes SSHKey implement json.Marshaller func (s SSHKey) MarshalJSON() ([]byte, error) { if s.Path == "" { - return []byte(fmt.Sprintf(`"%s"`, s.ID)), nil + return []byte(fmt.Sprintf(`%q`, s.ID)), nil } - return []byte(fmt.Sprintf(`"%s": %s`, s.ID, s.Path)), nil + return []byte(fmt.Sprintf(`%q: %s`, s.ID, s.Path)), nil } // MappingWithColon is a mapping type that can be converted from a list of @@ -467,91 +480,90 @@ func (s SSHKey) MarshalJSON() ([]byte, error) { type MappingWithColon map[string]string // HostsList is a list of colon-separated host-ip mappings -type HostsList []string +type HostsList map[string]string + +// AsList return host-ip mappings as a list of colon-separated strings +func (h HostsList) AsList() []string { + l := make([]string, 0, len(h)) + for k, v := range h { + l = append(l, fmt.Sprintf("%s:%s", k, v)) + } + return l +} + +func (h HostsList) MarshalYAML() (interface{}, error) { + list := h.AsList() + sort.Strings(list) + return list, nil +} + +func (h HostsList) MarshalJSON() ([]byte, error) { + list := h.AsList() + sort.Strings(list) + return json.Marshal(list) +} // LoggingConfig the logging configuration for a service type LoggingConfig struct { - Driver string `yaml:",omitempty" json:"driver,omitempty"` - Options map[string]string `yaml:",omitempty" json:"options,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Options Options `yaml:"options,omitempty" json:"options,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // DeployConfig the deployment configuration for a service type DeployConfig struct { - Mode string `yaml:",omitempty" json:"mode,omitempty"` - Replicas *uint64 `yaml:",omitempty" json:"replicas,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty" json:"update_config,omitempty"` - RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"` - Resources Resources `yaml:",omitempty" json:"resources,omitempty"` - RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` - Placement Placement `yaml:",omitempty" json:"placement,omitempty"` - EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` - - Extensions map[string]interface{} `yaml:",inline" json:"-"` -} - -// HealthCheckConfig the healthcheck configuration for a service -type HealthCheckConfig struct { - Test HealthCheckTest `yaml:",omitempty" json:"test,omitempty"` - Timeout *Duration `yaml:",omitempty" json:"timeout,omitempty"` - Interval *Duration `yaml:",omitempty" json:"interval,omitempty"` - Retries *uint64 `yaml:",omitempty" json:"retries,omitempty"` - StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"` - Disable bool `yaml:",omitempty" json:"disable,omitempty"` + Mode string `yaml:"mode,omitempty" json:"mode,omitempty"` + Replicas *uint64 `yaml:"replicas,omitempty" json:"replicas,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + UpdateConfig *UpdateConfig `yaml:"update_config,omitempty" json:"update_config,omitempty"` + RollbackConfig *UpdateConfig `yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"` + Resources Resources `yaml:"resources,omitempty" json:"resources,omitempty"` + RestartPolicy *RestartPolicy `yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` + Placement Placement `yaml:"placement,omitempty" json:"placement,omitempty"` + EndpointMode string `yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } -// HealthCheckTest is the command run to test the health of a service -type HealthCheckTest []string - // UpdateConfig the service update configuration type UpdateConfig struct { - Parallelism *uint64 `yaml:",omitempty" json:"parallelism,omitempty"` - Delay Duration `yaml:",omitempty" json:"delay,omitempty"` - FailureAction string `mapstructure:"failure_action" yaml:"failure_action,omitempty" json:"failure_action,omitempty"` - Monitor Duration `yaml:",omitempty" json:"monitor,omitempty"` - MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` - Order string `yaml:",omitempty" json:"order,omitempty"` + Parallelism *uint64 `yaml:"parallelism,omitempty" json:"parallelism,omitempty"` + Delay Duration `yaml:"delay,omitempty" json:"delay,omitempty"` + FailureAction string `yaml:"failure_action,omitempty" json:"failure_action,omitempty"` + Monitor Duration `yaml:"monitor,omitempty" json:"monitor,omitempty"` + MaxFailureRatio float32 `yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` + Order string `yaml:"order,omitempty" json:"order,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // Resources the resource limits and reservations type Resources struct { - Limits *Resource `yaml:",omitempty" json:"limits,omitempty"` - Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"` + Limits *Resource `yaml:"limits,omitempty" json:"limits,omitempty"` + Reservations *Resource `yaml:"reservations,omitempty" json:"reservations,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // Resource is a resource to be limited or reserved type Resource struct { // TODO: types to convert from units and ratios - NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` - MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"` - PIds int64 `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"` - Devices []DeviceRequest `mapstructure:"devices" yaml:"devices,omitempty" json:"devices,omitempty"` - GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` - - Extensions map[string]interface{} `yaml:",inline" json:"-"` -} + NanoCPUs string `yaml:"cpus,omitempty" json:"cpus,omitempty"` + MemoryBytes UnitBytes `yaml:"memory,omitempty" json:"memory,omitempty"` + Pids int64 `yaml:"pids,omitempty" json:"pids,omitempty"` + Devices []DeviceRequest `yaml:"devices,omitempty" json:"devices,omitempty"` + GenericResources []GenericResource `yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` -type DeviceRequest struct { - Capabilities []string `mapstructure:"capabilities" yaml:"capabilities,omitempty" json:"capabilities,omitempty"` - Driver string `mapstructure:"driver" yaml:"driver,omitempty" json:"driver,omitempty"` - Count int64 `mapstructure:"count" yaml:"count,omitempty" json:"count,omitempty"` - IDs []string `mapstructure:"device_ids" yaml:"device_ids,omitempty" json:"device_ids,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // GenericResource represents a "user defined" resource which can // only be an integer (e.g: SSD=3) for a service type GenericResource struct { - DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // DiscreteGenericResource represents a "user defined" resource which is defined @@ -562,67 +574,55 @@ type DiscreteGenericResource struct { Kind string `json:"kind"` Value int64 `json:"value"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` -} - -// UnitBytes is the bytes type -type UnitBytes int64 - -// MarshalYAML makes UnitBytes implement yaml.Marshaller -func (u UnitBytes) MarshalYAML() (interface{}, error) { - return fmt.Sprintf("%d", u), nil -} - -// MarshalJSON makes UnitBytes implement json.Marshaler -func (u UnitBytes) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%d"`, u)), nil + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // RestartPolicy the service restart policy type RestartPolicy struct { - Condition string `yaml:",omitempty" json:"condition,omitempty"` - Delay *Duration `yaml:",omitempty" json:"delay,omitempty"` - MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` - Window *Duration `yaml:",omitempty" json:"window,omitempty"` + Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` + Delay *Duration `yaml:"delay,omitempty" json:"delay,omitempty"` + MaxAttempts *uint64 `yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` + Window *Duration `yaml:"window,omitempty" json:"window,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // Placement constraints for the service type Placement struct { - Constraints []string `yaml:",omitempty" json:"constraints,omitempty"` - Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"` - MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` + Constraints []string `yaml:"constraints,omitempty" json:"constraints,omitempty"` + Preferences []PlacementPreferences `yaml:"preferences,omitempty" json:"preferences,omitempty"` + MaxReplicas uint64 `yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // PlacementPreferences is the preferences for a service placement type PlacementPreferences struct { - Spread string `yaml:",omitempty" json:"spread,omitempty"` + Spread string `yaml:"spread,omitempty" json:"spread,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // ServiceNetworkConfig is the network configuration for a service type ServiceNetworkConfig struct { - Priority int `yaml:",omitempty" json:"priotirt,omitempty"` - Aliases []string `yaml:",omitempty" json:"aliases,omitempty"` - Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` - Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` + Priority int `yaml:"priority,omitempty" json:"priority,omitempty"` + Aliases []string `yaml:"aliases,omitempty" json:"aliases,omitempty"` + Ipv4Address string `yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` + Ipv6Address string `yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` + LinkLocalIPs []string `yaml:"link_local_ips,omitempty" json:"link_local_ips,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // ServicePortConfig is the port configuration for a service type ServicePortConfig struct { - Mode string `yaml:",omitempty" json:"mode,omitempty"` - HostIP string `mapstructure:"host_ip" yaml:"host_ip,omitempty" json:"host_ip,omitempty"` - Target uint32 `yaml:",omitempty" json:"target,omitempty"` - Published string `yaml:",omitempty" json:"published,omitempty"` - Protocol string `yaml:",omitempty" json:"protocol,omitempty"` + Mode string `yaml:"mode,omitempty" json:"mode,omitempty"` + HostIP string `yaml:"host_ip,omitempty" json:"host_ip,omitempty"` + Target uint32 `yaml:"target,omitempty" json:"target,omitempty"` + Published string `yaml:"published,omitempty" json:"published,omitempty"` + Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // ParsePortConfig parse short syntax for service port configuration @@ -666,16 +666,35 @@ func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.Port // ServiceVolumeConfig are references to a volume used by a service type ServiceVolumeConfig struct { - Type string `yaml:",omitempty" json:"type,omitempty"` - Source string `yaml:",omitempty" json:"source,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` - ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` - Consistency string `yaml:",omitempty" json:"consistency,omitempty"` - Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"` - Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"` - Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"` + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` + Consistency string `yaml:"consistency,omitempty" json:"consistency,omitempty"` + Bind *ServiceVolumeBind `yaml:"bind,omitempty" json:"bind,omitempty"` + Volume *ServiceVolumeVolume `yaml:"volume,omitempty" json:"volume,omitempty"` + Tmpfs *ServiceVolumeTmpfs `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline" json:"-"` +} - Extensions map[string]interface{} `yaml:",inline" json:"-"` +// String render ServiceVolumeConfig as a volume string, one can parse back using loader.ParseVolume +func (s ServiceVolumeConfig) String() string { + access := "rw" + if s.ReadOnly { + access = "ro" + } + options := []string{access} + if s.Bind != nil && s.Bind.SELinux != "" { + options = append(options, s.Bind.SELinux) + } + if s.Bind != nil && s.Bind.Propagation != "" { + options = append(options, s.Bind.Propagation) + } + if s.Volume != nil && s.Volume.NoCopy { + options = append(options, "nocopy") + } + return fmt.Sprintf("%s:%s:%s", s.Source, s.Target, strings.Join(options, ",")) } const ( @@ -687,6 +706,8 @@ const ( VolumeTypeTmpfs = "tmpfs" // VolumeTypeNamedPipe is the type for mounting Windows named pipes VolumeTypeNamedPipe = "npipe" + // VolumeTypeCluster is the type for mounting container storage interface (CSI) volumes + VolumeTypeCluster = "cluster" // SElinuxShared share the volume content SElinuxShared = "z" @@ -696,11 +717,11 @@ const ( // ServiceVolumeBind are options for a service volume of type bind type ServiceVolumeBind struct { - SELinux string `mapstructure:"selinux" yaml:",omitempty" json:"selinux,omitempty"` - Propagation string `yaml:",omitempty" json:"propagation,omitempty"` - CreateHostPath bool `mapstructure:"create_host_path" yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` + SELinux string `yaml:"selinux,omitempty" json:"selinux,omitempty"` + Propagation string `yaml:"propagation,omitempty" json:"propagation,omitempty"` + CreateHostPath bool `yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // SELinux represents the SELinux re-labeling options. @@ -729,27 +750,29 @@ const ( // ServiceVolumeVolume are options for a service volume of type volume type ServiceVolumeVolume struct { - NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` + NoCopy bool `yaml:"nocopy,omitempty" json:"nocopy,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // ServiceVolumeTmpfs are options for a service volume of type tmpfs type ServiceVolumeTmpfs struct { - Size UnitBytes `yaml:",omitempty" json:"size,omitempty"` + Size UnitBytes `yaml:"size,omitempty" json:"size,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Mode uint32 `yaml:"mode,omitempty" json:"mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // FileReferenceConfig for a reference to a swarm file object type FileReferenceConfig struct { - Source string `yaml:",omitempty" json:"source,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` - UID string `yaml:",omitempty" json:"uid,omitempty"` - GID string `yaml:",omitempty" json:"gid,omitempty"` - Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"` + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + UID string `yaml:"uid,omitempty" json:"uid,omitempty"` + GID string `yaml:"gid,omitempty" json:"gid,omitempty"` + Mode *uint32 `yaml:"mode,omitempty" json:"mode,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // ServiceConfigObjConfig is the config obj configuration for a service @@ -760,11 +783,11 @@ type ServiceSecretConfig FileReferenceConfig // UlimitsConfig the ulimit configuration type UlimitsConfig struct { - Single int `yaml:",omitempty" json:"single,omitempty"` - Soft int `yaml:",omitempty" json:"soft,omitempty"` - Hard int `yaml:",omitempty" json:"hard,omitempty"` + Single int `yaml:"single,omitempty" json:"single,omitempty"` + Soft int `yaml:"soft,omitempty" json:"soft,omitempty"` + Hard int `yaml:"hard,omitempty" json:"hard,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // MarshalYAML makes UlimitsConfig implement yaml.Marshaller @@ -772,7 +795,13 @@ func (u *UlimitsConfig) MarshalYAML() (interface{}, error) { if u.Single != 0 { return u.Single, nil } - return u, nil + return struct { + Soft int + Hard int + }{ + Soft: u.Soft, + Hard: u.Hard, + }, nil } // MarshalJSON makes UlimitsConfig implement json.Marshaller @@ -786,51 +815,51 @@ func (u *UlimitsConfig) MarshalJSON() ([]byte, error) { // NetworkConfig for a network type NetworkConfig struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - Driver string `yaml:",omitempty" json:"driver,omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` - Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"` - External External `yaml:",omitempty" json:"external,omitempty"` - Internal bool `yaml:",omitempty" json:"internal,omitempty"` - Attachable bool `yaml:",omitempty" json:"attachable,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - EnableIPv6 bool `mapstructure:"enable_ipv6" yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + Ipam IPAMConfig `yaml:"ipam,omitempty" json:"ipam,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Internal bool `yaml:"internal,omitempty" json:"internal,omitempty"` + Attachable bool `yaml:"attachable,omitempty" json:"attachable,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + EnableIPv6 bool `yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // IPAMConfig for a network type IPAMConfig struct { - Driver string `yaml:",omitempty" json:"driver,omitempty"` - Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Config []*IPAMPool `yaml:"config,omitempty" json:"config,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // IPAMPool for a network type IPAMPool struct { - Subnet string `yaml:",omitempty" json:"subnet,omitempty"` - Gateway string `yaml:",omitempty" json:"gateway,omitempty"` - IPRange string `mapstructure:"ip_range" yaml:"ip_range,omitempty" json:"ip_range,omitempty"` - AuxiliaryAddresses map[string]string `mapstructure:"aux_addresses" yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"` + Subnet string `yaml:"subnet,omitempty" json:"subnet,omitempty"` + Gateway string `yaml:"gateway,omitempty" json:"gateway,omitempty"` + IPRange string `yaml:"ip_range,omitempty" json:"ip_range,omitempty"` + AuxiliaryAddresses Mapping `yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"` Extensions map[string]interface{} `yaml:",inline" json:"-"` } // VolumeConfig for a volume type VolumeConfig struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - Driver string `yaml:",omitempty" json:"driver,omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` - External External `yaml:",omitempty" json:"external,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // External identifies a Volume or Network as a reference to a resource that is // not managed, and should already exist. // External.name is deprecated and replaced by Volume.name type External struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - External bool `yaml:",omitempty" json:"external,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + External bool `yaml:"external,omitempty" json:"external,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // MarshalYAML makes External implement yaml.Marshaller @@ -851,22 +880,23 @@ func (e External) MarshalJSON() ([]byte, error) { // CredentialSpecConfig for credential spec on Windows type CredentialSpecConfig struct { - Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40 - File string `yaml:",omitempty" json:"file,omitempty"` - Registry string `yaml:",omitempty" json:"registry,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Config string `yaml:"config,omitempty" json:"config,omitempty"` // Config was added in API v1.40 + File string `yaml:"file,omitempty" json:"file,omitempty"` + Registry string `yaml:"registry,omitempty" json:"registry,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } // FileObjectConfig is a config type for a file used by a service type FileObjectConfig struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - File string `yaml:",omitempty" json:"file,omitempty"` - External External `yaml:",omitempty" json:"external,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - Driver string `yaml:",omitempty" json:"driver,omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` - TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + File string `yaml:"file,omitempty" json:"file,omitempty"` + Environment string `yaml:"environment,omitempty" json:"environment,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts map[string]string `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + TemplateDriver string `yaml:"template_driver,omitempty" json:"template_driver,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` } const ( @@ -883,14 +913,25 @@ const ( type DependsOnConfig map[string]ServiceDependency type ServiceDependency struct { - Condition string `yaml:",omitempty" json:"condition,omitempty"` - Extensions map[string]interface{} `yaml:",inline" json:"-"` + Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` + Restart bool `yaml:"restart,omitempty" json:"restart,omitempty"` + Extensions Extensions `yaml:"#extensions,inline" json:"-"` + Required bool `yaml:"required" json:"required"` } -type ExtendsConfig MappingWithEquals +type ExtendsConfig struct { + File string `yaml:"file,omitempty" json:"file,omitempty"` + Service string `yaml:"service,omitempty" json:"service,omitempty"` +} // SecretConfig for a secret type SecretConfig FileObjectConfig // ConfigObjConfig is the config for the swarm "Config" object type ConfigObjConfig FileObjectConfig + +type IncludeConfig struct { + Path StringList `yaml:"path,omitempty" json:"path,omitempty"` + ProjectDirectory string `yaml:"project_directory,omitempty" json:"project_directory,omitempty"` + EnvFile StringList `yaml:"env_file,omitempty" json:"env_file,omitempty"` +} diff --git a/vendor/github.com/compose-spec/compose-go/utils/collectionutils.go b/vendor/github.com/compose-spec/compose-go/utils/collectionutils.go new file mode 100644 index 0000000000..343692250d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/utils/collectionutils.go @@ -0,0 +1,51 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import "golang.org/x/exp/slices" + +func MapKeys[T comparable, U any](theMap map[T]U) []T { + var result []T + for key := range theMap { + result = append(result, key) + } + return result +} + +func MapsAppend[T comparable, U any](target map[T]U, source map[T]U) map[T]U { + if target == nil { + return source + } + if source == nil { + return target + } + for key, value := range source { + if _, ok := target[key]; !ok { + target[key] = value + } + } + return target +} + +func ArrayContains[T comparable](source []T, toCheck []T) bool { + for _, value := range toCheck { + if !slices.Contains(source, value) { + return false + } + } + return true +} diff --git a/vendor/github.com/compose-spec/compose-go/utils/stringutils.go b/vendor/github.com/compose-spec/compose-go/utils/stringutils.go new file mode 100644 index 0000000000..182ddf8302 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/utils/stringutils.go @@ -0,0 +1,58 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "fmt" + "strconv" + "strings" +) + +// StringContains check if an array contains a specific value +func StringContains(array []string, needle string) bool { + for _, val := range array { + if val == needle { + return true + } + } + return false +} + +// StringToBool converts a string to a boolean ignoring errors +func StringToBool(s string) bool { + b, _ := strconv.ParseBool(strings.ToLower(strings.TrimSpace(s))) + return b +} + +// GetAsEqualsMap split key=value formatted strings into a key : value map +func GetAsEqualsMap(em []string) map[string]string { + m := make(map[string]string) + for _, v := range em { + kv := strings.SplitN(v, "=", 2) + m[kv[0]] = kv[1] + } + return m +} + +// GetAsEqualsMap format a key : value map into key=value strings +func GetAsStringList(em map[string]string) []string { + m := make([]string, 0, len(em)) + for k, v := range em { + m = append(m, fmt.Sprintf("%s=%s", k, v)) + } + return m +} diff --git a/vendor/github.com/distribution/distribution/v3/LICENSE b/vendor/github.com/distribution/distribution/v3/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/vendor/github.com/distribution/distribution/v3/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/distribution/distribution/v3/digestset/set.go b/vendor/github.com/distribution/distribution/v3/digestset/set.go deleted file mode 100644 index 71327dca72..0000000000 --- a/vendor/github.com/distribution/distribution/v3/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/helpers.go b/vendor/github.com/distribution/distribution/v3/reference/helpers.go deleted file mode 100644 index 978df7eabb..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/normalize.go b/vendor/github.com/distribution/distribution/v3/reference/normalize.go deleted file mode 100644 index e7a1f9b528..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/normalize.go +++ /dev/null @@ -1,198 +0,0 @@ -package reference - -import ( - "fmt" - "strings" - - "github.com/distribution/distribution/v3/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/reference.go b/vendor/github.com/distribution/distribution/v3/reference/reference.go deleted file mode 100644 index 8c0c23b2fe..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/distribution/distribution/v3/reference/regexp.go b/vendor/github.com/distribution/distribution/v3/reference/regexp.go deleted file mode 100644 index 78e2f9170e..0000000000 --- a/vendor/github.com/distribution/distribution/v3/reference/regexp.go +++ /dev/null @@ -1,147 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. Repeated dashes and underscores are intentionally treated - // differently. In order to support valid hostnames as name components, - // supporting repeated dash was added. Additionally double underscore is - // now allowed as a separator to loosen the restriction for previously - // supported names. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 0000000000..0a1ff9f94d --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index aa8cbd7ce6..ffbbb62c70 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,18 +1,20 @@ # Mergo - -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] -[![GoCenter Kudos][15]][16] +[![GoDoc][3]][4] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -25,8 +27,14 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,11 +44,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. @@ -51,9 +59,8 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild @@ -98,6 +105,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install @@ -168,7 +177,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -218,7 +227,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,21 +235,8 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 0000000000..a5de61f77b --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46c..b50d5c2a4e 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8c2a8fcd90..0ef9b2138c 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,10 +77,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f6..0a721e2d85 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,10 +17,10 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: @@ -65,7 +68,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 38a099162c..c758234904 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + ## 1.4.3 * Fix cases where `json.Number` didn't decode properly [GH-261] diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 4d4bbc733b..3a754ca724 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -77,6 +77,28 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { } } +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + // StringToSliceHookFunc returns a DecodeHookFunc that converts // string to []string by splitting on the given sep. func StringToSliceHookFunc(sep string) DecodeHookFunc { diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 6b81b00679..1efb22ac36 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -122,7 +122,7 @@ // field value is zero and a numeric type, the field is empty, and it won't // be encoded into the destination type. // -// type Source { +// type Source struct { // Age int `mapstructure:",omitempty"` // } // @@ -215,6 +215,12 @@ type DecoderConfig struct { // (extra keys). ErrorUnused bool + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + // ZeroFields, if set to true, will zero fields before writing them. // For example, a map will be emptied before decoded values are put in // it. If this is false, a map will be merged. @@ -259,6 +265,10 @@ type DecoderConfig struct { // defaults to "mapstructure" TagName string + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + // MatchName is the function used to match the map key to the struct // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. @@ -284,6 +294,11 @@ type Metadata struct { // Unused is a slice of keys that were found in the raw value but // weren't decoded since there was no matching field in the result interface Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string } // Decode takes an input structure and uses reflection to translate it to @@ -375,6 +390,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { if config.Metadata.Unused == nil { config.Metadata.Unused = make([]string, 0) } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } } if config.TagName == "" { @@ -906,9 +925,15 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re tagValue := f.Tag.Get(d.config.TagName) keyName := f.Name + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + // Determine the name of the key in the map if index := strings.Index(tagValue, ","); index != -1 { if tagValue[:index] == "-" { @@ -920,7 +945,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -932,7 +957,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } } - keyName = tagValue[:index] + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } } else if len(tagValue) > 0 { if tagValue == "-" { continue @@ -1088,7 +1115,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } // If the input value is nil, then don't allocate since empty != nil - if dataVal.IsNil() { + if dataValKind != reflect.Array && dataVal.IsNil() { return nil } @@ -1250,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e dataValKeysUnused[dataValKey.Interface()] = struct{}{} } + targetValKeysUnused := make(map[interface{}]struct{}) errors := make([]string, 0) // This slice will keep track of all the structs we'll be decoding. @@ -1354,7 +1382,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !rawMapVal.IsValid() { // There was no matching key in the map for the value in - // the struct. Just ignore. + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} continue } } @@ -1414,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e errors = appendErrors(errors, err) } + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + if len(errors) > 0 { return &Error{errors} } @@ -1428,6 +1468,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } } return nil @@ -1465,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind { return kind } } + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 0000000000..2c033dff47 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 0000000000..8a7cf20dbd --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,282 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// When DeleteFunc removes m elements, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage +// collected. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + // Don't start copying elements until we find one to delete. + for i, v := range s { + if del(v) { + j := i + for i++; i < len(s); i++ { + v = s[i] + if !del(v) { + s[j] = v + j++ + } + } + return s[:j] + } + } + return s +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 0000000000..231b6448ac --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,128 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 0000000000..2a632476c5 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 0000000000..efaa1c8b71 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c20fb9c9eb..e7fe287a34 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -46,8 +46,8 @@ github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo github.com/chai2010/gettext-go/plural github.com/chai2010/gettext-go/po -# github.com/compose-spec/compose-go v1.2.2 -## explicit; go 1.17 +# github.com/compose-spec/compose-go v1.20.0 +## explicit; go 1.19 github.com/compose-spec/compose-go/consts github.com/compose-spec/compose-go/dotenv github.com/compose-spec/compose-go/errdefs @@ -55,7 +55,9 @@ github.com/compose-spec/compose-go/interpolation github.com/compose-spec/compose-go/loader github.com/compose-spec/compose-go/schema github.com/compose-spec/compose-go/template +github.com/compose-spec/compose-go/tree github.com/compose-spec/compose-go/types +github.com/compose-spec/compose-go/utils # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console @@ -102,10 +104,8 @@ github.com/creack/pty # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e -## explicit; go 1.15 -github.com/distribution/distribution/v3/digestset -github.com/distribution/distribution/v3/reference +# github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 +## explicit; go 1.18 # github.com/distribution/reference v0.5.0 ## explicit; go 1.20 github.com/distribution/reference @@ -348,7 +348,7 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 ## explicit github.com/iancoleman/orderedmap -# github.com/imdario/mergo v0.3.12 +# github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf @@ -449,7 +449,7 @@ github.com/mitchellh/go-homedir # github.com/mitchellh/go-wordwrap v1.0.1 ## explicit; go 1.14 github.com/mitchellh/go-wordwrap -# github.com/mitchellh/mapstructure v1.4.3 +# github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure # github.com/moby/buildkit v0.11.4 @@ -757,6 +757,10 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts +# golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 +## explicit; go 1.20 +golang.org/x/exp/constraints +golang.org/x/exp/slices # golang.org/x/mod v0.12.0 ## explicit; go 1.17 golang.org/x/mod/semver