From 88b62d2c27211fddf920a8d9b7d136ba58bbf544 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 20 Mar 2025 13:14:42 +0100 Subject: [PATCH] vendor: update c/common Signed-off-by: Giuseppe Scrivano --- go.mod | 26 +- go.sum | 54 +- libpod/oci_conmon_linux.go | 5 +- libpod/stats_linux.go | 3 +- .../handlers/compat/containers_stats_linux.go | 2 +- .../plugins/pkg/ns/README.md | 6 +- .../plugins/pkg/ns/ns_linux.go | 54 +- .../common/libnetwork/netavark/ipam.go | 3 +- .../common/pkg/cgroups/blkio_linux.go | 9 +- .../common/pkg/cgroups/cgroups_linux.go | 26 +- .../common/pkg/cgroups/cpu_linux.go | 9 +- .../common/pkg/cgroups/cpuset_linux.go | 9 +- .../common/pkg/cgroups/memory_linux.go | 9 +- .../common/pkg/cgroups/pids_linux.go | 9 +- .../common/pkg/cgroups/systemd_linux.go | 6 +- .../common/pkg/cgroups/utils_linux.go | 5 +- .../common/pkg/config/containers.conf | 24 +- .../common/pkg/config/containers.conf-freebsd | 22 +- .../pkg/secrets/filedriver/filedriver.go | 8 +- .../containers/common/pkg/secrets/secrets.go | 5 +- .../common/pkg/ssh/connection_golang.go | 5 +- .../common/pkg/sysinfo/sysinfo_linux.go | 2 +- .../containers/common/pkg/util/util.go | 2 +- .../image/v5/docker/archive/transport.go | 3 + .../v5/docker/daemon/daemon_transport.go | 9 +- .../image/v5/docker/docker_client.go | 30 +- .../image/v5/docker/docker_image_dest.go | 6 +- .../image/v5/oci/archive/oci_transport.go | 4 +- .../image/v5/oci/layout/oci_transport.go | 8 +- .../github.com/mdlayher/socket/CHANGELOG.md | 5 + vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 40 +- vendor/github.com/onsi/ginkgo/v2/README.md | 2 +- vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 58 +- .../onsi/ginkgo/v2/deprecated_dsl.go | 8 +- .../onsi/ginkgo/v2/formatter/formatter.go | 12 +- .../ginkgo/v2/ginkgo/build/build_command.go | 14 +- .../onsi/ginkgo/v2/ginkgo/command/abort.go | 6 +- .../onsi/ginkgo/v2/ginkgo/command/command.go | 6 +- .../onsi/ginkgo/v2/ginkgo/command/program.go | 2 - .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 2 +- .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 2 +- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 3 + .../onsi/ginkgo/v2/internal/failer.go | 2 +- .../interrupt_handler/interrupt_handler.go | 22 +- .../onsi/ginkgo/v2/internal/node.go | 34 +- .../ginkgo/v2/internal/output_interceptor.go | 14 +- .../v2/internal/output_interceptor_unix.go | 2 +- .../parallel_support/client_server.go | 2 +- .../internal/parallel_support/http_client.go | 9 +- .../internal/parallel_support/http_server.go | 4 +- .../internal/parallel_support/rpc_client.go | 2 +- .../internal/parallel_support/rpc_server.go | 10 +- .../parallel_support/server_handler.go | 4 +- .../onsi/ginkgo/v2/internal/report_entry.go | 2 +- .../internal/testingtproxy/testing_t_proxy.go | 43 +- .../onsi/ginkgo/v2/internal/writer.go | 6 +- .../ginkgo/v2/reporters/default_reporter.go | 4 +- .../onsi/ginkgo/v2/reporting_dsl.go | 12 +- vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 46 +- .../github.com/onsi/ginkgo/v2/types/config.go | 42 +- .../onsi/ginkgo/v2/types/deprecated_types.go | 2 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 15 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 10 +- .../onsi/ginkgo/v2/types/label_filter.go | 2 +- .../onsi/ginkgo/v2/types/report_entry.go | 8 +- .../onsi/ginkgo/v2/types/version.go | 2 +- .../opencontainers/cgroups/CODEOWNERS | 1 + .../opencontainers/cgroups/CONTRIBUTING.md | 150 + .../opencontainers/cgroups/GOVERNANCE.md | 63 + .../github.com/opencontainers/cgroups/LICENSE | 201 ++ .../opencontainers/cgroups/MAINTAINERS | 8 + .../cgroups/MAINTAINERS_GUIDE.md | 92 + .../opencontainers/cgroups/README.md | 11 + .../opencontainers/cgroups/RELEASES.md | 51 + .../libcontainer => }/cgroups/cgroups.go | 16 +- .../config_blkio_device.go} | 2 +- .../config_hugepages.go} | 2 +- .../config_ifprio_map.go} | 2 +- .../config_linux.go} | 4 +- .../rdma.go => cgroups/config_rdma.go} | 2 +- .../config_unsupported.go} | 2 +- .../cgroups/devices/config/device.go | 174 ++ .../cgroups/devices/config/mknod_unix.go | 14 + .../{runc/libcontainer => }/cgroups/file.go | 18 +- .../libcontainer => }/cgroups/fs/blkio.go | 7 +- .../{runc/libcontainer => }/cgroups/fs/cpu.go | 11 +- .../libcontainer => }/cgroups/fs/cpuacct.go | 38 +- .../libcontainer => }/cgroups/fs/cpuset.go | 101 +- .../libcontainer => }/cgroups/fs/devices.go | 7 +- .../libcontainer => }/cgroups/fs/error.go | 2 +- .../libcontainer => }/cgroups/fs/freezer.go | 39 +- .../{runc/libcontainer => }/cgroups/fs/fs.go | 25 +- .../libcontainer => }/cgroups/fs/hugetlb.go | 9 +- .../libcontainer => }/cgroups/fs/memory.go | 11 +- .../libcontainer => }/cgroups/fs/name.go | 7 +- .../libcontainer => }/cgroups/fs/net_cls.go | 7 +- .../libcontainer => }/cgroups/fs/net_prio.go | 7 +- .../libcontainer => }/cgroups/fs/paths.go | 25 +- .../cgroups/fs/perf_event.go | 7 +- .../libcontainer => }/cgroups/fs/pids.go | 9 +- .../libcontainer => }/cgroups/fs/rdma.go | 9 +- .../libcontainer => }/cgroups/fs2/cpu.go | 11 +- .../libcontainer => }/cgroups/fs2/cpuset.go | 7 +- .../libcontainer => }/cgroups/fs2/create.go | 13 +- .../cgroups/fs2/defaultpath.go | 44 +- .../libcontainer => }/cgroups/fs2/freezer.go | 43 +- .../libcontainer => }/cgroups/fs2/fs2.go | 28 +- .../libcontainer => }/cgroups/fs2/hugetlb.go | 9 +- .../{runc/libcontainer => }/cgroups/fs2/io.go | 7 +- .../libcontainer => }/cgroups/fs2/memory.go | 9 +- .../libcontainer => }/cgroups/fs2/misc.go | 4 +- .../libcontainer => }/cgroups/fs2/pids.go | 9 +- .../libcontainer => }/cgroups/fs2/psi.go | 16 +- .../cgroups/fscommon/rdma.go | 19 +- .../cgroups/fscommon/utils.go | 33 +- .../libcontainer => }/cgroups/getallpids.go | 0 .../cgroups/internal/path/path.go | 52 + .../{runc/libcontainer => }/cgroups/stats.go | 0 .../{runc/libcontainer => }/cgroups/utils.go | 4 +- .../libcontainer => }/cgroups/v1_utils.go | 0 .../runc/libcontainer/configs/config.go | 508 --- .../runc/libcontainer/configs/config_linux.go | 97 - .../libcontainer/configs/configs_fuzzer.go | 9 - .../runc/libcontainer/configs/intelrdt.go | 16 - .../runc/libcontainer/configs/mount.go | 7 - .../runc/libcontainer/configs/mount_linux.go | 66 - .../libcontainer/configs/mount_unsupported.go | 9 - .../runc/libcontainer/configs/namespaces.go | 5 - .../libcontainer/configs/namespaces_linux.go | 133 - .../configs/namespaces_syscall.go | 45 - .../configs/namespaces_syscall_unsupported.go | 13 - .../configs/namespaces_unsupported.go | 7 - .../runc/libcontainer/configs/network.go | 75 - vendor/github.com/pkg/sftp/client.go | 4 +- vendor/github.com/pkg/sftp/server.go | 36 +- vendor/github.com/pkg/sftp/server_posix.go | 21 + vendor/github.com/pkg/sftp/server_windows.go | 156 +- vendor/go.etcd.io/bbolt/.gitignore | 2 + vendor/go.etcd.io/bbolt/.go-version | 2 +- vendor/go.etcd.io/bbolt/Makefile | 62 +- vendor/go.etcd.io/bbolt/OWNERS | 10 + vendor/go.etcd.io/bbolt/README.md | 68 +- .../bbolt/{bolt_unix_aix.go => bolt_aix.go} | 1 - vendor/go.etcd.io/bbolt/bolt_android.go | 90 + vendor/go.etcd.io/bbolt/bolt_arm64.go | 1 - vendor/go.etcd.io/bbolt/bolt_loong64.go | 1 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 1 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 1 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 1 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 1 - .../{bolt_unix_solaris.go => bolt_solaris.go} | 0 vendor/go.etcd.io/bbolt/bolt_unix.go | 7 +- vendor/go.etcd.io/bbolt/bolt_windows.go | 8 +- vendor/go.etcd.io/bbolt/boltsync_unix.go | 1 - vendor/go.etcd.io/bbolt/bucket.go | 484 ++- vendor/go.etcd.io/bbolt/cursor.go | 99 +- vendor/go.etcd.io/bbolt/db.go | 451 +-- vendor/go.etcd.io/bbolt/errors.go | 76 +- vendor/go.etcd.io/bbolt/errors/errors.go | 84 + vendor/go.etcd.io/bbolt/freelist.go | 410 --- vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 -- .../bbolt/internal/common/bucket.go | 54 + .../go.etcd.io/bbolt/internal/common/inode.go | 115 + .../go.etcd.io/bbolt/internal/common/meta.go | 161 + .../go.etcd.io/bbolt/internal/common/page.go | 391 +++ .../go.etcd.io/bbolt/internal/common/types.go | 40 + .../bbolt/{ => internal/common}/unsafe.go | 10 +- .../go.etcd.io/bbolt/internal/common/utils.go | 64 + .../bbolt/internal/common/verify.go | 67 + .../bbolt/internal/freelist/array.go | 108 + .../bbolt/internal/freelist/freelist.go | 82 + .../bbolt/internal/freelist/hashmap.go | 292 ++ .../bbolt/internal/freelist/shared.go | 310 ++ vendor/go.etcd.io/bbolt/logger.go | 113 + vendor/go.etcd.io/bbolt/mlock_unix.go | 1 - vendor/go.etcd.io/bbolt/node.go | 252 +- vendor/go.etcd.io/bbolt/page.go | 212 -- vendor/go.etcd.io/bbolt/tx.go | 234 +- vendor/go.etcd.io/bbolt/tx_check.go | 206 +- .../x/tools/go/ast/inspector/inspector.go | 124 +- .../x/tools/go/ast/inspector/typeof.go | 2 +- .../x/tools/go/ast/inspector/walk.go | 341 +++ .../x/tools/internal/astutil/edge/edge.go | 295 ++ vendor/gopkg.in/yaml.v2/.travis.yml | 17 + vendor/gopkg.in/yaml.v2/LICENSE | 201 ++ vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 744 +++++ vendor/gopkg.in/yaml.v2/decode.go | 815 +++++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ vendor/gopkg.in/yaml.v2/encode.go | 390 +++ vendor/gopkg.in/yaml.v2/parserc.go | 1095 +++++++ vendor/gopkg.in/yaml.v2/readerc.go | 412 +++ vendor/gopkg.in/yaml.v2/resolve.go | 258 ++ vendor/gopkg.in/yaml.v2/scannerc.go | 2711 +++++++++++++++++ vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 478 +++ vendor/gopkg.in/yaml.v2/yamlh.go | 739 +++++ vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ vendor/modules.txt | 52 +- .../pkg/cdi/annotations.go | 4 +- .../pkg/cdi/cache.go | 47 +- .../pkg/cdi/cache_test_darwin.go | 26 + .../pkg/cdi/cache_test_unix.go | 4 +- .../pkg/cdi/container-edits.go | 27 +- .../pkg/cdi/device.go | 2 +- .../container-device-interface/pkg/cdi/doc.go | 51 +- .../pkg/cdi/qualified-device.go | 113 - .../pkg/cdi/registry.go | 178 -- .../pkg/cdi/spec-dirs.go | 3 +- .../pkg/cdi/spec.go | 57 +- .../specs-go/config.go | 79 +- .../specs-go/oci.go | 56 - .../{pkg/cdi => specs-go}/version.go | 86 +- 219 files changed, 15959 insertions(+), 3847 deletions(-) create mode 100644 vendor/github.com/opencontainers/cgroups/CODEOWNERS create mode 100644 vendor/github.com/opencontainers/cgroups/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/cgroups/GOVERNANCE.md create mode 100644 vendor/github.com/opencontainers/cgroups/LICENSE create mode 100644 vendor/github.com/opencontainers/cgroups/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/cgroups/MAINTAINERS_GUIDE.md create mode 100644 vendor/github.com/opencontainers/cgroups/README.md create mode 100644 vendor/github.com/opencontainers/cgroups/RELEASES.md rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/cgroups.go (84%) rename vendor/github.com/opencontainers/{runc/libcontainer/configs/blkio_device.go => cgroups/config_blkio_device.go} (99%) rename vendor/github.com/opencontainers/{runc/libcontainer/configs/hugepage_limit.go => cgroups/config_hugepages.go} (91%) rename vendor/github.com/opencontainers/{runc/libcontainer/configs/interface_priority_map.go => cgroups/config_ifprio_map.go} (93%) rename vendor/github.com/opencontainers/{runc/libcontainer/configs/cgroup_linux.go => cgroups/config_linux.go} (98%) rename vendor/github.com/opencontainers/{runc/libcontainer/configs/rdma.go => cgroups/config_rdma.go} (95%) rename vendor/github.com/opencontainers/{runc/libcontainer/configs/cgroup_unsupported.go => cgroups/config_unsupported.go} (92%) create mode 100644 vendor/github.com/opencontainers/cgroups/devices/config/device.go create mode 100644 vendor/github.com/opencontainers/cgroups/devices/config/mknod_unix.go rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/file.go (92%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/blkio.go (97%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/cpu.go (92%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/cpuacct.go (74%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/cpuset.go (67%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/devices.go (71%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/error.go (84%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/freezer.go (79%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/fs.go (89%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/hugetlb.go (83%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/memory.go (96%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/name.go (60%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/net_cls.go (64%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/net_prio.go (64%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/paths.go (82%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/perf_event.go (53%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/pids.go (77%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs/rdma.go (50%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/cpu.go (90%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/cpuset.go (64%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/create.go (91%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/defaultpath.go (57%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/freezer.go (71%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/fs2.go (91%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/hugetlb.go (81%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/io.go (95%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/memory.go (95%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/misc.go (87%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/pids.go (84%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fs2/psi.go (82%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fscommon/rdma.go (86%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/fscommon/utils.go (79%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/getallpids.go (100%) create mode 100644 vendor/github.com/opencontainers/cgroups/internal/path/path.go rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/stats.go (100%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/utils.go (99%) rename vendor/github.com/opencontainers/{runc/libcontainer => }/cgroups/v1_utils.go (100%) delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/network.go create mode 100644 vendor/github.com/pkg/sftp/server_posix.go create mode 100644 vendor/go.etcd.io/bbolt/OWNERS rename vendor/go.etcd.io/bbolt/{bolt_unix_aix.go => bolt_aix.go} (99%) create mode 100644 vendor/go.etcd.io/bbolt/bolt_android.go rename vendor/go.etcd.io/bbolt/{bolt_unix_solaris.go => bolt_solaris.go} (100%) create mode 100644 vendor/go.etcd.io/bbolt/errors/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bucket.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/inode.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/meta.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/page.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/types.go rename vendor/go.etcd.io/bbolt/{ => internal/common}/unsafe.go (74%) create mode 100644 vendor/go.etcd.io/bbolt/internal/common/utils.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/verify.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/array.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/freelist.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/shared.go create mode 100644 vendor/go.etcd.io/bbolt/logger.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/walk.go create mode 100644 vendor/golang.org/x/tools/internal/astutil/edge/edge.go create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_darwin.go delete mode 100644 vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go delete mode 100644 vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go delete mode 100644 vendor/tags.cncf.io/container-device-interface/specs-go/oci.go rename vendor/tags.cncf.io/container-device-interface/{pkg/cdi => specs-go}/version.go (69%) diff --git a/go.mod b/go.mod index 06e7222e240..65679c9fd48 100644 --- a/go.mod +++ b/go.mod @@ -12,12 +12,12 @@ require ( github.com/buger/goterm v1.0.4 github.com/checkpoint-restore/checkpointctl v1.3.0 github.com/checkpoint-restore/go-criu/v7 v7.2.0 - github.com/containernetworking/plugins v1.5.1 + github.com/containernetworking/plugins v1.6.2 github.com/containers/buildah v1.39.2 - github.com/containers/common v0.62.2-0.20250311121556-b27979403716 + github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.8.4 - github.com/containers/image/v5 v5.34.2-0.20250306154130-12497efe55ac + github.com/containers/image/v5 v5.34.3-0.20250311194052-d84dbab374e7 github.com/containers/libhvee v0.10.0 github.com/containers/ocicrypt v1.2.1 github.com/containers/psgo v1.9.0 @@ -53,11 +53,11 @@ require ( github.com/moby/sys/user v0.3.0 github.com/moby/term v0.5.2 github.com/nxadm/tail v1.4.11 - github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/ginkgo/v2 v2.23.1 github.com/onsi/gomega v1.36.2 + github.com/opencontainers/cgroups v0.0.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/opencontainers/runc v1.2.6 github.com/opencontainers/runtime-spec v1.2.1 github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271 github.com/opencontainers/selinux v1.11.1 @@ -70,7 +70,7 @@ require ( github.com/stretchr/testify v1.10.0 github.com/vbauerster/mpb/v8 v8.9.3 github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b - go.etcd.io/bbolt v1.3.11 + go.etcd.io/bbolt v1.4.0 golang.org/x/crypto v0.36.0 golang.org/x/net v0.37.0 golang.org/x/sync v0.12.0 @@ -81,7 +81,7 @@ require ( gopkg.in/inf.v0 v0.9.1 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/yaml v1.4.0 - tags.cncf.io/container-device-interface v0.8.1 + tags.cncf.io/container-device-interface v1.0.0 ) require ( @@ -166,7 +166,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mdlayher/socket v0.5.0 // indirect + github.com/mdlayher/socket v0.5.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -180,11 +180,12 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/runc v1.2.6 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pkg/sftp v1.13.7 // indirect + github.com/pkg/sftp v1.13.8 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect @@ -222,14 +223,15 @@ require ( go.opentelemetry.io/otel/trace v1.32.0 // indirect golang.org/x/arch v0.8.0 // indirect golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect - golang.org/x/mod v0.22.0 // indirect + golang.org/x/mod v0.23.0 // indirect golang.org/x/oauth2 v0.26.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.29.0 // indirect + golang.org/x/tools v0.30.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/grpc v1.70.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + tags.cncf.io/container-device-interface/specs-go v1.0.0 // indirect ) diff --git a/go.sum b/go.sum index 631476a0670..04142ff06ca 100644 --- a/go.sum +++ b/go.sum @@ -74,18 +74,18 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= -github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= -github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= +github.com/containernetworking/plugins v1.6.2 h1:pqP8Mq923TLyef5g97XfJ/xpDeVek4yF8A4mzy9Tc4U= +github.com/containernetworking/plugins v1.6.2/go.mod h1:SP5UG3jDO9LtmfbBJdP+nl3A1atOtbj2MBOYsnaxy64= github.com/containers/buildah v1.39.2 h1:YaFMNnuTr7wKYKQDHkm7yyP9HhWVrNB4DA+DjYUS9k4= github.com/containers/buildah v1.39.2/go.mod h1:Vb4sDbEq06qQqk29mcGw/1qit8dyukpfL4hwNQ5t+z8= -github.com/containers/common v0.62.2-0.20250311121556-b27979403716 h1:4kwkokczKDAeYELVxxjV5Mpys/sA5TZreq6+cTpzX/M= -github.com/containers/common v0.62.2-0.20250311121556-b27979403716/go.mod h1:Dta+lCx83XAeGHtWwTPz+UwpWMiC0nMZQu4LWm1mTEg= +github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef h1:OpMI1mIyrKWPGAYlJY9VVjsCXigtk+3ckYE1SN7JxYA= +github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef/go.mod h1:vGmB+0zxqbh5Gd0kOKDlC6sIeoMqso3l4ZKg6+G3YFc= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/gvisor-tap-vsock v0.8.4 h1:z7MqcldnXYGaU6uTaKVl7RFxTmbhNsd2UL0CyM3fdBs= github.com/containers/gvisor-tap-vsock v0.8.4/go.mod h1:Guh8d/SiuJ9jlnuIyUjcKkFKQ2qpLhNKPGD1jMoIt2Q= -github.com/containers/image/v5 v5.34.2-0.20250306154130-12497efe55ac h1:6MRfoW2H0PBNfL5vsGeD2yCz/y7BoObc8a/drJLTRXE= -github.com/containers/image/v5 v5.34.2-0.20250306154130-12497efe55ac/go.mod h1:Q0vC30+RLFCPdM9Nvm4eaFH5MvWZPkGFPEL1+EzzOHo= +github.com/containers/image/v5 v5.34.3-0.20250311194052-d84dbab374e7 h1:DqNRPDES8oT32Fk2puucIsTnUbPkhKs8Ozo6nuzQjEM= +github.com/containers/image/v5 v5.34.3-0.20250311194052-d84dbab374e7/go.mod h1:hq675fbWkn/bGJwXNXysDu1PQWMOL9P4i2yDvZlSRdA= github.com/containers/libhvee v0.10.0 h1:7VLv8keWZpHuGmWvyY4c1mVH5V1JYb1G78VC+8AlrM0= github.com/containers/libhvee v0.10.0/go.mod h1:at0h8lRcK5jCKfQgU/e6Io0Mw12F36zRLjXVOXRoDTM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -347,8 +347,8 @@ github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBW github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= +github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= @@ -388,10 +388,12 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/ginkgo/v2 v2.23.1 h1:Ox0cOPv/t8RzKJUfDo9ZKtRvBOJY369sFJnl00CjqwY= +github.com/onsi/ginkgo/v2 v2.23.1/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA= +github.com/opencontainers/cgroups v0.0.1/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -416,8 +418,8 @@ github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= -github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= +github.com/pkg/sftp v1.13.8 h1:Xt7eJ/xqXv7s0VuzFw7JXhZj6Oc1zI6l4GK8KP9sFB0= +github.com/pkg/sftp v1.13.8/go.mod h1:DmvEkvKE2lshEeuo2JMp06yqcx9HVnR7e3zqQl42F3U= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -444,6 +446,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/rootless-containers/rootlesskit/v2 v2.3.2 h1:QZk7sKU3+B8UHretEeIg6NSTTpj0o4iHGNhNbJBnHOU= github.com/rootless-containers/rootlesskit/v2 v2.3.2/go.mod h1:RL7YzL02nA2d8HAzt5d1nZnuiAeudQ4oym+HF/7sk7U= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY= github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= @@ -546,8 +550,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= @@ -580,10 +584,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -600,8 +604,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -665,7 +669,6 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -677,7 +680,6 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= @@ -709,8 +711,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -751,6 +753,8 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -766,7 +770,7 @@ sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= src.elv.sh v0.16.0-rc1.0.20220116211855-fda62502ad7f h1:pjVeIo9Ba6K1Wy+rlwX91zT7A+xGEmxiNRBdN04gDTQ= src.elv.sh v0.16.0-rc1.0.20220116211855-fda62502ad7f/go.mod h1:kPbhv5+fBeUh85nET3wWhHGUaUQ64nZMJ8FwA5v5Olg= -tags.cncf.io/container-device-interface v0.8.1 h1:c0jN4Mt6781jD67NdPajmZlD1qrqQyov/Xfoab37lj0= -tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y= -tags.cncf.io/container-device-interface/specs-go v0.8.0 h1:QYGFzGxvYK/ZLMrjhvY0RjpUavIn4KcmRmVP/JjdBTA= -tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws= +tags.cncf.io/container-device-interface v1.0.0 h1:fbwPQiWZNpXUb9Os6t6JW52rsOppTFUbeJOpNtN1TmI= +tags.cncf.io/container-device-interface v1.0.0/go.mod h1:mmi2aRGmOjK/6NR3TXjLpEIarOJ9qwgZjQ3nTIRwAaA= +tags.cncf.io/container-device-interface/specs-go v1.0.0 h1:8gLw29hH1ZQP9K1YtAzpvkHCjjyIxHZYzBAvlQ+0vD8= +tags.cncf.io/container-device-interface/specs-go v1.0.0/go.mod h1:u86hoFWqnh3hWz3esofRFKbI261bUlvUfLKGrDhJkgQ= diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 5cc5948d8d6..89374120d9f 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -13,15 +13,14 @@ import ( "strings" "sync" - runcconfig "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/systemd" "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/podman/v5/pkg/rootless" pmount "github.com/containers/storage/pkg/mount" + runcconfig "github.com/opencontainers/cgroups" + devices "github.com/opencontainers/cgroups/devices/config" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" diff --git a/libpod/stats_linux.go b/libpod/stats_linux.go index c2f53fc16f6..7f13dc368c3 100644 --- a/libpod/stats_linux.go +++ b/libpod/stats_linux.go @@ -9,10 +9,9 @@ import ( "syscall" "time" - runccgroup "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/containers/common/pkg/cgroups" "github.com/containers/podman/v5/libpod/define" + runccgroup "github.com/opencontainers/cgroups" "golang.org/x/sys/unix" ) diff --git a/pkg/api/handlers/compat/containers_stats_linux.go b/pkg/api/handlers/compat/containers_stats_linux.go index ccb48dc3b43..e08e2bb1f7e 100644 --- a/pkg/api/handlers/compat/containers_stats_linux.go +++ b/pkg/api/handlers/compat/containers_stats_linux.go @@ -15,7 +15,7 @@ import ( api "github.com/containers/podman/v5/pkg/api/types" "github.com/containers/storage/pkg/system" "github.com/docker/docker/api/types/container" - runccgroups "github.com/opencontainers/runc/libcontainer/cgroups" + runccgroups "github.com/opencontainers/cgroups" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md index 1e265c7a011..e5fef2db700 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -13,10 +13,10 @@ The `ns.Do()` method provides **partial** control over network namespaces for yo ```go err = targetNs.Do(func(hostNs ns.NetNS) error { + linkAttrs := netlink.NewLinkAttrs() + linkAttrs.Name = "dummy0" dummy := &netlink.Dummy{ - LinkAttrs: netlink.LinkAttrs{ - Name: "dummy0", - }, + LinkAttrs: linkAttrs, } return netlink.LinkAdd(dummy) }) diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go index f260f28132e..5a6aaa33398 100644 --- a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -31,6 +31,10 @@ func GetCurrentNS() (NetNS, error) { // return an unexpected network namespace. runtime.LockOSThread() defer runtime.UnlockOSThread() + return getCurrentNSNoLock() +} + +func getCurrentNSNoLock() (NetNS, error) { return GetNS(getCurrentThreadNetNSPath()) } @@ -152,6 +156,54 @@ func GetNS(nspath string) (NetNS, error) { return &netNS{file: fd}, nil } +// Returns a new empty NetNS. +// Calling Close() let the kernel garbage collect the network namespace. +func TempNetNS() (NetNS, error) { + var tempNS NetNS + var err error + var wg sync.WaitGroup + wg.Add(1) + + // Create the new namespace in a new goroutine so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + go func() { + defer wg.Done() + runtime.LockOSThread() + + var threadNS NetNS + // save a handle to current network namespace + threadNS, err = getCurrentNSNoLock() + if err != nil { + err = fmt.Errorf("failed to open current namespace: %v", err) + return + } + defer threadNS.Close() + + // create the temporary network namespace + err = unix.Unshare(unix.CLONE_NEWNET) + if err != nil { + return + } + + // get a handle to the temporary network namespace + tempNS, err = getCurrentNSNoLock() + + err2 := threadNS.Set() + if err2 == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + wg.Wait() + return tempNS, err +} + func (ns *netNS) Path() string { return ns.file.Name() } @@ -173,7 +225,7 @@ func (ns *netNS) Do(toRun func(NetNS) error) error { } containedCall := func(hostNS NetNS) error { - threadNS, err := GetCurrentNS() + threadNS, err := getCurrentNSNoLock() if err != nil { return fmt.Errorf("failed to open current netns: %v", err) } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go index d348ada0b29..54c6603e8f2 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -11,6 +11,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" "go.etcd.io/bbolt" + boltErrors "go.etcd.io/bbolt/errors" ) // IPAM boltdb structure @@ -371,7 +372,7 @@ func (n *netavarkNetwork) removeNetworkIPAMBucket(network *types.Network) error return db.Update(func(tx *bbolt.Tx) error { // Ignore ErrBucketNotFound, can happen if the network never allocated any ips, // i.e. because no container was started. - if err := tx.DeleteBucket([]byte(network.Name)); err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { + if err := tx.DeleteBucket([]byte(network.Name)); err != nil && !errors.Is(err, boltErrors.ErrBucketNotFound) { return err } return nil diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go index 246f2c231ac..a277660f8a4 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go @@ -11,10 +11,9 @@ import ( "strconv" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/fs2" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fs" + "github.com/opencontainers/cgroups/fs2" ) type linuxBlkioHandler struct { @@ -26,7 +25,7 @@ func getBlkioHandler() *linuxBlkioHandler { } // Apply set the specified constraints -func (c *linuxBlkioHandler) Apply(ctr *CgroupControl, res *configs.Resources) error { +func (c *linuxBlkioHandler) Apply(ctr *CgroupControl, res *cgroups.Resources) error { if ctr.cgroup2 { man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path)) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index f39a75e63fd..f40d2159667 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -8,9 +8,11 @@ import ( "context" "errors" "fmt" + "maps" "math" "os" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -21,11 +23,9 @@ import ( "github.com/containers/storage/pkg/unshare" systemdDbus "github.com/coreos/go-systemd/v22/dbus" "github.com/godbus/dbus/v5" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs2" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fs2" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" "golang.org/x/sys/unix" ) @@ -44,7 +44,7 @@ var ( // CgroupControl controls a cgroup hierarchy type CgroupControl struct { cgroup2 bool - config *configs.Cgroup + config *cgroups.Cgroup systemd bool // List of additional cgroup subsystems joined that // do not have a custom handler. @@ -58,7 +58,7 @@ type controller struct { type controllerHandler interface { Create(*CgroupControl) (bool, error) - Apply(*CgroupControl, *configs.Resources) error + Apply(*CgroupControl, *cgroups.Resources) error Destroy(*CgroupControl) error Stat(*CgroupControl, *cgroups.Stats) error } @@ -297,14 +297,14 @@ func readFileByKeyAsUint64(path, key string) (uint64, error) { } // New creates a new cgroup control -func New(path string, resources *configs.Resources) (*CgroupControl, error) { +func New(path string, resources *cgroups.Resources) (*CgroupControl, error) { cgroup2, err := IsCgroup2UnifiedMode() if err != nil { return nil, err } control := &CgroupControl{ cgroup2: cgroup2, - config: &configs.Cgroup{ + config: &cgroups.Cgroup{ Path: path, Resources: resources, }, @@ -326,7 +326,7 @@ func New(path string, resources *configs.Resources) (*CgroupControl, error) { } // NewSystemd creates a new cgroup control -func NewSystemd(path string, resources *configs.Resources) (*CgroupControl, error) { +func NewSystemd(path string, resources *cgroups.Resources) (*CgroupControl, error) { cgroup2, err := IsCgroup2UnifiedMode() if err != nil { return nil, err @@ -334,7 +334,7 @@ func NewSystemd(path string, resources *configs.Resources) (*CgroupControl, erro control := &CgroupControl{ cgroup2: cgroup2, systemd: true, - config: &configs.Cgroup{ + config: &cgroups.Cgroup{ Path: path, Resources: resources, Rootless: unshare.IsRootless(), @@ -353,7 +353,7 @@ func Load(path string) (*CgroupControl, error) { control := &CgroupControl{ cgroup2: cgroup2, systemd: false, - config: &configs.Cgroup{ + config: &cgroups.Cgroup{ Path: path, }, } @@ -485,7 +485,7 @@ func (c *CgroupControl) DeleteByPath(path string) error { } // Update updates the cgroups -func (c *CgroupControl) Update(resources *configs.Resources) error { +func (c *CgroupControl) Update(resources *cgroups.Resources) error { for _, h := range handlers { if err := h.Apply(c, resources); err != nil { return err @@ -503,7 +503,7 @@ func (c *CgroupControl) AddPid(pid int) error { return fs2.CreateCgroupPath(path, c.config) } - names := maps.Keys(handlers) + names := slices.Collect(maps.Keys(handlers)) for _, c := range c.additionalControllers { if !c.symlink { diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go index c10df7968ec..7a8ffb8be65 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go @@ -8,10 +8,9 @@ import ( "path/filepath" "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/fs2" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fs" + "github.com/opencontainers/cgroups/fs2" ) type linuxCPUHandler struct { @@ -23,7 +22,7 @@ func getCPUHandler() *linuxCPUHandler { } // Apply set the specified constraints -func (c *linuxCPUHandler) Apply(ctr *CgroupControl, res *configs.Resources) error { +func (c *linuxCPUHandler) Apply(ctr *CgroupControl, res *cgroups.Resources) error { if ctr.cgroup2 { man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path)) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go index 28b46fb2580..f9719102078 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go @@ -5,10 +5,9 @@ package cgroups import ( "path/filepath" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/fs2" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fs" + "github.com/opencontainers/cgroups/fs2" ) type linuxCpusetHandler struct { @@ -20,7 +19,7 @@ func getCpusetHandler() *linuxCpusetHandler { } // Apply set the specified constraints -func (c *linuxCpusetHandler) Apply(ctr *CgroupControl, res *configs.Resources) error { +func (c *linuxCpusetHandler) Apply(ctr *CgroupControl, res *cgroups.Resources) error { if ctr.cgroup2 { man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path)) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go index b82945c5b4b..f81bf992b31 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go @@ -7,10 +7,9 @@ import ( "path/filepath" "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/fs2" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fs" + "github.com/opencontainers/cgroups/fs2" ) type linuxMemHandler struct { @@ -22,7 +21,7 @@ func getMemoryHandler() *linuxMemHandler { } // Apply set the specified constraints -func (c *linuxMemHandler) Apply(ctr *CgroupControl, res *configs.Resources) error { +func (c *linuxMemHandler) Apply(ctr *CgroupControl, res *cgroups.Resources) error { if ctr.cgroup2 { man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path)) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go index 61ee85394b9..c87ebfd165a 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go @@ -5,10 +5,9 @@ package cgroups import ( "path/filepath" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/fs2" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fs" + "github.com/opencontainers/cgroups/fs2" ) type linuxPidHandler struct { @@ -20,7 +19,7 @@ func getPidsHandler() *linuxPidHandler { } // Apply set the specified constraints -func (c *linuxPidHandler) Apply(ctr *CgroupControl, res *configs.Resources) error { +func (c *linuxPidHandler) Apply(ctr *CgroupControl, res *cgroups.Resources) error { if ctr.cgroup2 { man, err := fs2.NewManager(ctr.config, filepath.Join(cgroupRoot, ctr.config.Path)) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go index b723b34cc93..4ae3c0af46f 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go @@ -14,7 +14,7 @@ import ( systemdDbus "github.com/coreos/go-systemd/v22/dbus" "github.com/godbus/dbus/v5" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type BlkioDev struct { @@ -22,7 +22,7 @@ type BlkioDev struct { Bytes uint64 } -func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Conn) error { +func systemdCreate(resources *cgroups.Resources, path string, c *systemdDbus.Conn) error { slice, name := filepath.Split(path) slice = strings.TrimSuffix(slice, "/") @@ -150,7 +150,7 @@ func systemdDestroyConn(path string, c *systemdDbus.Conn) error { return nil } -func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64, map[string][]BlkioDev, error) { +func resourcesToProps(res *cgroups.Resources, v2 bool) (map[string]uint64, map[string]string, map[string][]byte, map[string]int64, map[string][]BlkioDev, error) { bMap := make(map[string][]byte) // this array is not used but will be once more resource limits are added sMap := make(map[string]string) diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go index 8cd4a052901..848a59d236c 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go @@ -14,8 +14,7 @@ import ( "sync" "github.com/containers/storage/pkg/fileutils" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -122,7 +121,7 @@ func BlkioFiles(cgroupPath string) (wtFile, wtDevFile string) { } // SetBlkioThrottle sets the throttle limits for the cgroup -func SetBlkioThrottle(res *configs.Resources, cgroupPath string) error { +func SetBlkioThrottle(res *cgroups.Resources, cgroupPath string) error { for _, td := range res.BlkioThrottleReadBpsDevice { if err := WriteFile(cgroupPath, "blkio.throttle.read_bps_device", fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)); err != nil { return err diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 0d22bcf3805..5bc6fa1444e 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -236,13 +236,12 @@ default_sysctls = [ # #mounts = [] -# Default way to to create a Network namespace for the container -# Options are: -# `private` Create private Network Namespace for the container. -# `host` Share host Network Namespace with the container. -# `none` Containers do not use the network +# Default way to create a NET namespace for the container. +# The option is mapped to the **--network** argument for the podman commands, it accepts the same values as that option. +# For example it can be set to `bridge`, `host`, `none`, `pasta` and more, see the podman-create(1) +# manual for all available options. # -#netns = "private" +#netns = "" # Do not modify the `/etc/hosts` file in the container. Podman assumes control # over the container's `/etc/hosts` file by default; refer to the `--add-host` @@ -381,14 +380,17 @@ default_sysctls = [ #firewall_driver = "" -# The network name of the default network to attach pods to. +# The name of the default network as seen in `podman network ls`. This option only effects the network assignment when +# the bridge network mode is selected, i.e. `--network bridge`. It is the default for rootful containers but not as +# rootless. To change the default network mode use the **netns** option under the `[containers]` table. +# +# Note: This should not be changed while you have any containers using this network. # #default_network = "podman" # The default subnet for the default network given in default_network. -# If a network with that name does not exist, a new network using that name and -# this subnet will be created. -# Must be a valid IPv4 CIDR prefix. +# +# Note: This should not be changed if any containers are currently running on the default network. # #default_subnet = "10.88.0.0/16" @@ -897,7 +899,7 @@ default_sysctls = [ # Linux: # qemu - Open source machine emulator and virtualizer. (Default) # Windows: there are currently two options: -# wsl - Windows Subsystem for Linux (Default) +# wsl - Windows Subsystem for Linux (Default) # hyperv - Windows Server Virtualization # Mac: there are currently two options: # applehv - Default Apple Hypervisor (Default) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index f5b51dd2262..205ce433f1b 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -182,13 +182,12 @@ default_sysctls = [ # #log_tag = "" -# Default way to to create a Network namespace for the container -# Options are: -# `private` Create private Network Namespace for the container. -# `host` Share host Network Namespace with the container. -# `none` Containers do not use the network +# Default way to create a NET namespace for the container. +# The option is mapped to the **--network** argument for the podman commands, it accepts the same values as that option. +# For example it can be set to `bridge`, `host`, `none`, `pasta` and more, see the podman-create(1) +# manual for all available options. # -#netns = "private" +#netns = "" # Create /etc/hosts for the container. By default, container engine manage # /etc/hosts, automatically adding the container's own IP address. @@ -292,14 +291,17 @@ default_sysctls = [ # "/usr/lib/netavark", #] -# The network name of the default network to attach pods to. +# The name of the default network as seen in `podman network ls`. This option only effects the network assignment when +# the bridge network mode is selected, i.e. `--network bridge`. It is the default for rootful containers but not as +# rootless. To change the default network mode use the **netns** option under the `[containers]` table. +# +# Note: This should not be changed while you have any containers using this network. # #default_network = "podman" # The default subnet for the default network given in default_network. -# If a network with that name does not exist, a new network using that name and -# this subnet will be created. -# Must be a valid IPv4 CIDR prefix. +# +# Note: This should not be changed if any containers are currently running on the default network. # #default_subnet = "10.88.0.0/16" diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 54df4a69e32..33481cbbe6f 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -5,14 +5,14 @@ import ( "errors" "fmt" "io" + "maps" "os" "path/filepath" - "sort" + "slices" "github.com/containers/common/pkg/secrets/define" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/lockfile" - "golang.org/x/exp/maps" ) // secretsDataFile is the file where secrets data/payload will be stored @@ -53,9 +53,7 @@ func (d *Driver) List() ([]string, error) { if err != nil { return nil, err } - allID := maps.Keys(secretData) - sort.Strings(allID) - return allID, err + return slices.Sorted(maps.Keys(secretData)), nil } // Lookup returns the bytes associated with a secret ID diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index 763389a566c..113c6c43b34 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -3,8 +3,10 @@ package secrets import ( "errors" "fmt" + "maps" "os" "path/filepath" + "slices" "strings" "time" @@ -14,7 +16,6 @@ import ( "github.com/containers/common/pkg/secrets/shelldriver" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" - "golang.org/x/exp/maps" ) // maxSecretSize is the max size for secret data - 512kB @@ -290,7 +291,7 @@ func (s *SecretsManager) List() ([]Secret, error) { if err != nil { return nil, err } - return maps.Values(secrets), nil + return slices.Collect(maps.Values(secrets)), nil } // LookupSecretData returns secret metadata as well as secret data in bytes. diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go index 0439354c4a2..7749429d517 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -6,12 +6,14 @@ import ( "errors" "fmt" "io" + "maps" "net" "net/url" "os" "os/user" "path/filepath" "regexp" + "slices" "strings" "time" @@ -29,7 +31,6 @@ import ( "github.com/skeema/knownhosts" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" - "golang.org/x/exp/maps" ) func golangConnectionCreate(options ConnectionCreateOptions) error { @@ -287,7 +288,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection dedup[fp] = s } - uniq := maps.Values(dedup) + uniq := slices.Collect(maps.Values(dedup)) authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { return uniq, nil })) diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go index 190ec1e4555..67484ad025d 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go @@ -9,7 +9,7 @@ import ( "github.com/containers/common/pkg/cgroupv2" "github.com/containers/storage/pkg/fileutils" - "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/cgroups" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 1e516701898..14464927e4b 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -15,7 +15,7 @@ import ( // StringInSlice determines if a string is in a string slice, returns bool. // -// Deprecated: Use [golang.org/x/exp/slices.Contains] instead. +// Deprecated: Use [slices.Contains] instead. func StringInSlice(s string, sl []string) bool { return slices.Contains(sl, s) } diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 39e92cac350..2c3926cca9c 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -101,6 +101,9 @@ func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, // NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index. func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } return newReference(path, nil, sourceIndex, nil, nil) } diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index 9eedff74567..9fc0a92fd77 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -87,10 +87,13 @@ func ParseReference(refString string) (types.ImageReference, error) { // NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { - if id != "" && ref != nil { + switch { + case id != "" && ref != nil: return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") - } - if ref != nil { + case id == "" && ref == nil: + return nil, errors.New("docker-daemon: reference must have at least one of an image ID and a reference string") + + case ref != nil: if reference.IsNameOnly(ref) { return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 45f113ca3dc..5da375a3bf0 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -998,7 +998,12 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R resp.Body.Close() continue } - return resp.Body, getBlobSize(resp), nil + + size, err := getBlobSize(resp) + if err != nil { + size = -1 + } + return resp.Body, size, nil } if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob @@ -1006,12 +1011,20 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) } -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 +func getBlobSize(resp *http.Response) (int64, error) { + hdrs := resp.Header.Values("Content-Length") + if len(hdrs) == 0 { + return -1, errors.New(`Missing "Content-Length" header in response`) } - return size + hdr := hdrs[0] // Equivalent to resp.Header.Get(…) + size, err := strconv.ParseInt(hdr, 10, 64) + if err != nil { // Go’s response reader should already reject such values. + return -1, err + } + if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. + return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) + } + return size, nil } // getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). @@ -1042,7 +1055,10 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize := getBlobSize(res) + blobSize, err := getBlobSize(res) + if err != nil { + blobSize = -1 + } reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 3ac43cf9fc6..b08ebc6fec7 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -243,8 +243,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. defer res.Body.Close() switch res.StatusCode { case http.StatusOK: + size, err := getBlobSize(res) + if err != nil { + return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) + } logrus.Debugf("... already exists") - return true, getBlobSize(res), nil + return true, size, nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index d5fee36310f..1cc3fa3ba49 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -52,13 +52,13 @@ func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) erro return internal.ValidateScope(scope) } -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI archive ImageReference. func ParseReference(reference string) (types.ImageReference, error) { file, image := internal.SplitPathAndImage(reference) return NewReference(file, image) } -// NewReference returns an OCI reference for a file and a image. +// NewReference returns an OCI archive reference for a file and an optional image name annotation (if not ""). func NewReference(file, image string) (types.ImageReference, error) { resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) if err != nil { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 800d1aa2921..832f89080ea 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -111,13 +111,13 @@ func newReference(dir, image string, sourceIndex int) (types.ImageReference, err // NewIndexReference returns an OCI reference for a path and a zero-based source manifest index. func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } return newReference(dir, "", sourceIndex) } -// NewReference returns an OCI reference for a directory and a image. -// -// We do not expose an API supplying the resolvedDir; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +// NewReference returns an OCI reference for a directory and an optional image name annotation (if not ""). func NewReference(dir, image string) (types.ImageReference, error) { return newReference(dir, image, -1) } diff --git a/vendor/github.com/mdlayher/socket/CHANGELOG.md b/vendor/github.com/mdlayher/socket/CHANGELOG.md index e1a77c4110f..b83418532d0 100644 --- a/vendor/github.com/mdlayher/socket/CHANGELOG.md +++ b/vendor/github.com/mdlayher/socket/CHANGELOG.md @@ -1,5 +1,10 @@ # CHANGELOG +## v0.5.1 + +- [Improvement]: revert `go.mod` to Go 1.20 to [resolve an issue around Go + module version upgrades](https://github.com/mdlayher/socket/issues/13). + ## v0.5.0 **This is the first release of package socket that only supports Go 1.21+. diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index f9d52e58429..50af9d0baad 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,41 @@ +## 2.23.1 + +## 🚨 For users on MacOS 🚨 + +A long-standing Ginkgo performance issue on MacOS seems to be due to mac's antimalware XProtect. You can follow the instructions [here](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) to disable it in your terminal. Doing so sped up Ginkgo's own test suite from 1m8s to 47s. + +### Fixes + +Ginkgo's CLI is now a bit clearer if you pass flags in incorrectly: + +- make it clearer that you need to pass a filename to the various profile flags, not an absolute directory [a0e52ff] +- emit an error and exit if the ginkgo invocation includes flags after positional arguments [b799d8d] + +This might cause existing CI builds to fail. If so then it's likely that your CI build was misconfigured and should be corrected. Open an issue if you need help. + +## 2.23.0 + +Ginkgo 2.23.0 adds a handful of methods to `GinkgoT()` to make it compatible with the `testing.TB` interface in Go 1.24. `GinkgoT().Context()`, in particular, is a useful shorthand for generating a new context that will clean itself up in a `DeferCleanup()`. This has subtle behavior differences from the golang implementation but should make sense in a Ginkgo... um... context. + +### Features +- bump to go 1.24.0 - support new testing.TB methods and add a test to cover testing.TB regressions [37a511b] + +### Fixes +- fix edge case where build -o is pointing at an explicit file, not a directory [7556a86] +- Fix binary paths when precompiling multiple suites. [4df06c6] + +### Maintenance +- Fix: Correct Markdown list rendering in MIGRATING_TO_V2.md [cbcf39a] +- docs: fix test workflow badge (#1512) [9b261ff] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1516) [00f19c8] +- Bump golang.org/x/tools from 0.28.0 to 0.30.0 (#1515) [e98a4df] +- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#1504) [60cc4e2] +- Bump github-pages from 231 to 232 in /docs (#1447) [fea6f2d] +- Bump rexml from 3.2.8 to 3.3.9 in /docs (#1497) [31d7813] +- Bump webrick from 1.8.1 to 1.9.1 in /docs (#1501) [fc3bbd6] +- Code linting (#1500) [aee0d56] +- change interface{} to any (#1502) [809a710] + ## 2.22.2 ### Maintenance @@ -630,7 +668,7 @@ Ginkgo also uses this progress reporting infrastructure under the hood when hand ### Features - `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes. - As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. + As a result the **signature of these methods has changed** and now includes a trailing `args ...any`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. ### Maintenance - Modernize the invocation of Ginkgo in github actions [0ffde58] diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index cb23ffdf6a8..e3d0c13cc6e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -1,6 +1,6 @@ ![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png) -[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) +[![test](https://github.com/onsi/ginkgo/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/) --- diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index a3e8237e938..d027bdff930 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -83,9 +83,9 @@ func exitIfErrors(errors []error) { type GinkgoWriterInterface interface { io.Writer - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) TeeTo(writer io.Writer) ClearTeeWriters() @@ -243,7 +243,7 @@ for more on how specs are parallelized in Ginkgo. You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite. */ -func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { +func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { if suiteDidRun { exitIfErr(types.GinkgoErrors.RerunningSuite()) } @@ -316,7 +316,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { return passed } -func extractSuiteConfiguration(args []interface{}) Labels { +func extractSuiteConfiguration(args []any) Labels { suiteLabels := Labels{} configErrors := []error{} for _, arg := range args { @@ -491,14 +491,14 @@ to Describe the behavior of an object or function and, within that Describe, out You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ -func Describe(text string, args ...interface{}) bool { +func Describe(text string, args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) } /* FDescribe focuses specs within the Describe block. */ -func FDescribe(text string, args ...interface{}) bool { +func FDescribe(text string, args ...any) bool { args = append(args, internal.Focus) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) } @@ -506,7 +506,7 @@ func FDescribe(text string, args ...interface{}) bool { /* PDescribe marks specs within the Describe block as pending. */ -func PDescribe(text string, args ...interface{}) bool { +func PDescribe(text string, args ...any) bool { args = append(args, internal.Pending) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) } @@ -522,18 +522,18 @@ var XDescribe = PDescribe var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe /* When is an alias for Describe - it generates the exact same kind of Container node */ -func When(text string, args ...interface{}) bool { +func When(text string, args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) } /* When is an alias for Describe - it generates the exact same kind of Container node */ -func FWhen(text string, args ...interface{}) bool { +func FWhen(text string, args ...any) bool { args = append(args, internal.Focus) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) } /* When is an alias for Describe - it generates the exact same kind of Container node */ -func PWhen(text string, args ...interface{}) bool { +func PWhen(text string, args ...any) bool { args = append(args, internal.Pending) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) } @@ -550,14 +550,14 @@ You can pass It nodes bare functions (func() {}) or functions that receive a Spe You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ -func It(text string, args ...interface{}) bool { +func It(text string, args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) } /* FIt allows you to focus an individual It. */ -func FIt(text string, args ...interface{}) bool { +func FIt(text string, args ...any) bool { args = append(args, internal.Focus) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) } @@ -565,7 +565,7 @@ func FIt(text string, args ...interface{}) bool { /* PIt allows you to mark an individual It as pending. */ -func PIt(text string, args ...interface{}) bool { +func PIt(text string, args ...any) bool { args = append(args, internal.Pending) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) } @@ -611,8 +611,8 @@ BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(c You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func BeforeSuite(body interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{body} +func BeforeSuite(body any, args ...any) bool { + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) } @@ -630,8 +630,8 @@ AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(co You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func AfterSuite(body interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{body} +func AfterSuite(body any, args ...any) bool { + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) } @@ -667,8 +667,8 @@ If either function receives a context.Context/SpecContext it is considered inter You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{process1Body, allProcessBody} +func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) bool { + combinedArgs := []any{process1Body, allProcessBody} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) @@ -687,8 +687,8 @@ Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accompli You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool { - combinedArgs := []interface{}{allProcessBody, process1Body} +func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) bool { + combinedArgs := []any{allProcessBody, process1Body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) @@ -703,7 +703,7 @@ BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(co You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ -func BeforeEach(args ...interface{}) bool { +func BeforeEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) } @@ -716,7 +716,7 @@ JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/fun You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ -func JustBeforeEach(args ...interface{}) bool { +func JustBeforeEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) } @@ -731,7 +731,7 @@ AfterEach can take a func() body, or an interruptible func(SpecContext)/func(con You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ -func AfterEach(args ...interface{}) bool { +func AfterEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) } @@ -743,7 +743,7 @@ JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ -func JustAfterEach(args ...interface{}) bool { +func JustAfterEach(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) } @@ -758,7 +758,7 @@ You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ -func BeforeAll(args ...interface{}) bool { +func BeforeAll(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) } @@ -775,7 +775,7 @@ You cannot nest any other Ginkgo nodes within an AfterAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ -func AfterAll(args ...interface{}) bool { +func AfterAll(args ...any) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) } @@ -818,7 +818,7 @@ When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node. You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup */ -func DeferCleanup(args ...interface{}) { +func DeferCleanup(args ...any) { fail := func(message string, cl types.CodeLocation) { global.Failer.Fail(message, cl) } diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go index f912bbec65a..fd45b8beabe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -118,9 +118,9 @@ Use Gomega's gmeasure package instead. You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code */ type Benchmarker interface { - Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) - RecordValue(name string, value float64, info ...interface{}) - RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) + Time(name string, body func(), info ...any) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...any) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...any) } /* @@ -129,7 +129,7 @@ Deprecated: Measure() has been removed from Ginkgo 2.0 Use Gomega's gmeasure package instead. You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code */ -func Measure(_ ...interface{}) bool { +func Measure(_ ...any) bool { deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1)) return true } diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 4d5749114c0..f61356db19c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -24,15 +24,15 @@ const ( var SingletonFormatter = New(ColorModeTerminal) -func F(format string, args ...interface{}) string { +func F(format string, args ...any) string { return SingletonFormatter.F(format, args...) } -func Fi(indentation uint, format string, args ...interface{}) string { +func Fi(indentation uint, format string, args ...any) string { return SingletonFormatter.Fi(indentation, format, args...) } -func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { +func Fiw(indentation uint, maxWidth uint, format string, args ...any) string { return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) } @@ -115,15 +115,15 @@ func New(colorMode ColorMode) Formatter { return f } -func (f Formatter) F(format string, args ...interface{}) string { +func (f Formatter) F(format string, args ...any) string { return f.Fi(0, format, args...) } -func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { +func (f Formatter) Fi(indentation uint, format string, args ...any) string { return f.Fiw(indentation, 0, format, args...) } -func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { out := f.style(format) if len(args) > 0 { out = fmt.Sprintf(out, args...) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index fd172608437..a071b8d0917 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -55,18 +55,22 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - if len(goFlagsConfig.O) == 0 { - goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") - } else { + var testBinPath string + if len(goFlagsConfig.O) != 0 { stat, err := os.Stat(goFlagsConfig.O) if err != nil { panic(err) } if stat.IsDir() { - goFlagsConfig.O += "/" + suite.PackageName + ".test" + testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test" + } else { + testBinPath = goFlagsConfig.O } } - fmt.Printf("Compiled %s\n", goFlagsConfig.O) + if len(testBinPath) == 0 { + testBinPath = path.Join(suite.Path, suite.PackageName+".test") + } + fmt.Printf("Compiled %s\n", testBinPath) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go index 2efd2860886..f0e7331f7df 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -12,7 +12,7 @@ func Abort(details AbortDetails) { panic(details) } -func AbortGracefullyWith(format string, args ...interface{}) { +func AbortGracefullyWith(format string, args ...any) { Abort(AbortDetails{ ExitCode: 0, Error: fmt.Errorf(format, args...), @@ -20,7 +20,7 @@ func AbortGracefullyWith(format string, args ...interface{}) { }) } -func AbortWith(format string, args ...interface{}) { +func AbortWith(format string, args ...any) { Abort(AbortDetails{ ExitCode: 1, Error: fmt.Errorf(format, args...), @@ -28,7 +28,7 @@ func AbortWith(format string, args ...interface{}) { }) } -func AbortWithUsage(format string, args ...interface{}) { +func AbortWithUsage(format string, args ...any) { Abort(AbortDetails{ ExitCode: 1, Error: fmt.Errorf(format, args...), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go index 12e0e565914..a9cf9fcb2cf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -24,7 +24,11 @@ func (c Command) Run(args []string, additionalArgs []string) { if err != nil { AbortWithUsage(err.Error()) } - + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + AbortWith("Malformed arguments - make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages.\n{{gray}}e.g. 'ginkgo run -p my_package' is valid `ginkgo -p run my_package` is not.{{/}}") + } + } c.Command(args, additionalArgs) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go index 88dd8d6b07e..c3f6d3a11ef 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -68,7 +68,6 @@ func (p Program) RunAndExit(osArgs []string) { fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) } p.Exiter(exitCode) - return }() args, additionalArgs := []string{}, []string{} @@ -157,7 +156,6 @@ func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { p.EmitUsage(writer) Abort(AbortDetails{ExitCode: 1}) } - return } func (p Program) EmitUsage(writer io.Writer) { diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go index 3c5079ff4c7..87cfa111944 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -89,7 +89,7 @@ func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) } i := 0 - if sortFunc(i) != true { + if !sortFunc(i) { i = sort.Search(len(p.Blocks)-startIndex, sortFunc) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go index aaed4d570e9..b7d77390bb2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -142,7 +142,7 @@ OUTER_LOOP: } if !endTime.IsZero() { - r.suiteConfig.Timeout = endTime.Sub(time.Now()) + r.suiteConfig.Timeout = time.Until(endTime) if r.suiteConfig.Timeout <= 0 { suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout opc.StopAndDrain() diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 02c6739e5be..0ca6e235b49 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,6 +1,7 @@ package ginkgo import ( + "context" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -48,6 +49,8 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the */ type GinkgoTInterface interface { Cleanup(func()) + Chdir(dir string) + Context() context.Context Setenv(kev, value string) Error(args ...any) Errorf(format string, args ...any) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go index e9bd9565fc7..8c5de9c1603 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go @@ -32,7 +32,7 @@ func (f *Failer) GetFailure() types.Failure { return f.failure } -func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic any) { f.lock.Lock() defer f.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index 8ed86111f78..79bfa87db22 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -40,7 +40,7 @@ func (ic InterruptCause) String() string { } type InterruptStatus struct { - Channel chan interface{} + Channel chan any Level InterruptLevel Cause InterruptCause } @@ -62,14 +62,14 @@ type InterruptHandlerInterface interface { } type InterruptHandler struct { - c chan interface{} + c chan any lock *sync.Mutex level InterruptLevel cause InterruptCause client parallel_support.Client - stop chan interface{} + stop chan any signals []os.Signal - requestAbortCheck chan interface{} + requestAbortCheck chan any } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { @@ -77,10 +77,10 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) * signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ - c: make(chan interface{}), + c: make(chan any), lock: &sync.Mutex{}, - stop: make(chan interface{}), - requestAbortCheck: make(chan interface{}), + stop: make(chan any), + requestAbortCheck: make(chan any), client: client, signals: signals, } @@ -98,9 +98,9 @@ func (handler *InterruptHandler) registerForInterrupts() { signal.Notify(signalChannel, handler.signals...) // cross-process abort handling - var abortChannel chan interface{} + var abortChannel chan any if handler.client != nil { - abortChannel = make(chan interface{}) + abortChannel = make(chan any) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { @@ -125,7 +125,7 @@ func (handler *InterruptHandler) registerForInterrupts() { }() } - go func(abortChannel chan interface{}) { + go func(abortChannel chan any) { var interruptCause InterruptCause for { select { @@ -151,7 +151,7 @@ func (handler *InterruptHandler) registerForInterrupts() { } if handler.level != oldLevel { close(handler.c) - handler.c = make(chan interface{}) + handler.c = make(chan any) } handler.lock.Unlock() } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 0686f741037..8096950b6c6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -84,7 +84,7 @@ const SuppressProgressReporting = suppressProgressReporting(true) type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint -type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing +type Done chan<- any // Deprecated Done Channel for asynchronous testing type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration @@ -110,9 +110,9 @@ func UnionOfLabels(labels ...Labels) Labels { return out } -func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { - decorations := []interface{}{} - remainingArgs := []interface{}{} +func PartitionDecorations(args ...any) ([]any, []any) { + decorations := []any{} + remainingArgs := []any{} for _, arg := range args { if isDecoration(arg) { decorations = append(decorations, arg) @@ -123,7 +123,7 @@ func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { return decorations, remainingArgs } -func isDecoration(arg interface{}) bool { +func isDecoration(arg any) bool { switch t := reflect.TypeOf(arg); { case t == nil: return false @@ -168,7 +168,7 @@ func isDecoration(arg interface{}) bool { } } -func isSliceOfDecorations(slice interface{}) bool { +func isSliceOfDecorations(slice any) bool { vSlice := reflect.ValueOf(slice) if vSlice.Len() == 0 { return false @@ -184,7 +184,7 @@ func isSliceOfDecorations(slice interface{}) bool { var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { +func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (Node, []error) { baseOffset := 2 node := Node{ ID: UniqueNodeID(), @@ -207,7 +207,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) - remainingArgs := []interface{}{} + remainingArgs := []any{} // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { @@ -223,7 +223,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy labelsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs - remainingArgs = []interface{}{} + remainingArgs = []any{} // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { @@ -451,7 +451,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy var doneType = reflect.TypeOf(make(Done)) -func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { +func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg any) (func(SpecContext), bool) { t := reflect.TypeOf(arg) if t.NumOut() > 0 || t.NumIn() > 1 { return nil, false @@ -477,7 +477,7 @@ func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types. var byteType = reflect.TypeOf([]byte{}) -func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { +func extractSynchronizedBeforeSuiteProc1Body(arg any) (func(SpecContext) []byte, bool) { t := reflect.TypeOf(arg) v := reflect.ValueOf(arg) @@ -505,7 +505,7 @@ func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) }, hasContext } -func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { +func extractSynchronizedBeforeSuiteAllProcsBody(arg any) (func(SpecContext, []byte), bool) { t := reflect.TypeOf(arg) v := reflect.ValueOf(arg) hasContext, hasByte := false, false @@ -536,11 +536,11 @@ func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecConte var errInterface = reflect.TypeOf((*error)(nil)).Elem() -func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { +func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...any) (Node, []error) { decorations, remainingArgs := PartitionDecorations(args...) baseOffset := 2 cl := types.NewCodeLocation(baseOffset) - finalArgs := []interface{}{} + finalArgs := []any{} for _, arg := range decorations { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(Offset(0)): @@ -920,12 +920,12 @@ func (n Nodes) GetMaxMustPassRepeatedly() int { return maxMustPassRepeatedly } -func unrollInterfaceSlice(args interface{}) []interface{} { +func unrollInterfaceSlice(args any) []any { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { - return []interface{}{args} + return []any{args} } - out := []interface{}{} + out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go index 4a1c0946127..5598f15cbbf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -69,7 +69,7 @@ type pipePair struct { writer *os.File } -func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) { +func startPipeFactory(pipeChannel chan pipePair, shutdown chan any) { for { //make the next pipe... pair := pipePair{} @@ -101,8 +101,8 @@ type genericOutputInterceptor struct { stderrClone *os.File pipe pipePair - shutdown chan interface{} - emergencyBailout chan interface{} + shutdown chan any + emergencyBailout chan any pipeChannel chan pipePair interceptedContent chan string @@ -139,7 +139,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { interceptor.intercepting = true if interceptor.stdoutClone == nil { interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones() - interceptor.shutdown = make(chan interface{}) + interceptor.shutdown = make(chan any) go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) } @@ -147,13 +147,13 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running interceptor.pipe = <-interceptor.pipeChannel - interceptor.emergencyBailout = make(chan interface{}) + interceptor.emergencyBailout = make(chan any) //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting go func() { buffer := &bytes.Buffer{} destination := io.MultiWriter(buffer, interceptor.forwardTo) - copyFinished := make(chan interface{}) + copyFinished := make(chan any) reader := interceptor.pipe.reader go func() { io.Copy(destination, reader) @@ -224,7 +224,7 @@ func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor { return &genericOutputInterceptor{ interceptedContent: make(chan string), pipeChannel: make(chan pipePair), - shutdown: make(chan interface{}), + shutdown: make(chan any), implementation: &osGlobalReassigningOutputInterceptorImpl{}, } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index 8a237f44637..e0f1431d512 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -13,7 +13,7 @@ func NewOutputInterceptor() OutputInterceptor { return &genericOutputInterceptor{ interceptedContent: make(chan string), pipeChannel: make(chan pipePair), - shutdown: make(chan interface{}), + shutdown: make(chan any), implementation: &dupSyscallOutputInterceptorImpl{}, } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index b3cd64292ab..4234d802cf3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -30,7 +30,7 @@ type Server interface { Close() Address() string RegisterAlive(node int, alive func() bool) - GetSuiteDone() chan interface{} + GetSuiteDone() chan any GetOutputDestination() io.Writer SetOutputDestination(io.Writer) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index 6547c7a66e5..4aa10ae4f99 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -34,7 +34,7 @@ func (client *httpClient) Close() error { return nil } -func (client *httpClient) post(path string, data interface{}) error { +func (client *httpClient) post(path string, data any) error { var body io.Reader if data != nil { encoded, err := json.Marshal(data) @@ -54,7 +54,7 @@ func (client *httpClient) post(path string, data interface{}) error { return nil } -func (client *httpClient) poll(path string, data interface{}) error { +func (client *httpClient) poll(path string, data any) error { for { resp, err := http.Get(client.serverHost + path) if err != nil { @@ -153,10 +153,7 @@ func (client *httpClient) PostAbort() error { func (client *httpClient) ShouldAbort() bool { err := client.poll("/abort", nil) - if err == ErrorGone { - return true - } - return false + return err == ErrorGone } func (client *httpClient) Write(p []byte) (int, error) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index d2c71ab1b25..8a1b7a5bbe8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -75,7 +75,7 @@ func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } -func (server *httpServer) GetSuiteDone() chan interface{} { +func (server *httpServer) GetSuiteDone() chan any { return server.handler.done } @@ -96,7 +96,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // // The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` -func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { writer.WriteHeader(http.StatusBadRequest) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index 59e8e6fd0a6..bb4675a02c1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -35,7 +35,7 @@ func (client *rpcClient) Close() error { return client.client.Close() } -func (client *rpcClient) poll(method string, data interface{}) error { +func (client *rpcClient) poll(method string, data any) error { for { err := client.client.Call(method, voidSender, data) if err == nil { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go index 2620fd562d3..1574f99ac4f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -25,7 +25,7 @@ type RPCServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -37,7 +37,7 @@ func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, e }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *RPCServer) Start() { rpcServer := rpc.NewServer() rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server @@ -48,17 +48,17 @@ func (server *RPCServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *RPCServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *RPCServer) Address() string { return server.listener.Addr().String() } -func (server *RPCServer) GetSuiteDone() chan interface{} { +func (server *RPCServer) GetSuiteDone() chan any { return server.handler.done } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index a6d98793e9f..ab9e11372ca 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,7 +18,7 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} + done chan any outputDestination io.Writer reporter reporters.Reporter alives []func() bool @@ -46,7 +46,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan parallelTotal: parallelTotal, outputDestination: os.Stdout, - done: make(chan interface{}), + done: make(chan any), } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go index cc351a39bd6..9c18dc8e586 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -8,7 +8,7 @@ import ( type ReportEntry = types.ReportEntry -func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) { +func NewReportEntry(name string, cl types.CodeLocation, args ...any) (ReportEntry, error) { out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 73e26556568..b4ecc7cb83d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -1,6 +1,7 @@ package testingtproxy import ( + "context" "fmt" "io" "os" @@ -19,9 +20,9 @@ type addReportEntryFunc func(names string, args ...any) type ginkgoWriterInterface interface { io.Writer - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) } type ginkgoRecoverFunc func() type attachProgressReporterFunc func(func() string) func() @@ -80,11 +81,31 @@ func (t *ginkgoTestingTProxy) Setenv(key, value string) { } } -func (t *ginkgoTestingTProxy) Error(args ...interface{}) { +func (t *ginkgoTestingTProxy) Chdir(dir string) { + currentDir, err := os.Getwd() + if err != nil { + t.fail(fmt.Sprintf("Failed to get current directory: %v", err), 1) + } + + t.cleanup(os.Chdir, currentDir, internal.Offset(1)) + + err = os.Chdir(dir) + if err != nil { + t.fail(fmt.Sprintf("Failed to change directory: %v", err), 1) + } +} + +func (t *ginkgoTestingTProxy) Context() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + t.cleanup(cancel, internal.Offset(1)) + return ctx +} + +func (t *ginkgoTestingTProxy) Error(args ...any) { t.fail(fmt.Sprintln(args...), t.offset) } -func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Errorf(format string, args ...any) { t.fail(fmt.Sprintf(format, args...), t.offset) } @@ -100,11 +121,11 @@ func (t *ginkgoTestingTProxy) Failed() bool { return t.report().Failed() } -func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { +func (t *ginkgoTestingTProxy) Fatal(args ...any) { t.fail(fmt.Sprintln(args...), t.offset) } -func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...any) { t.fail(fmt.Sprintf(format, args...), t.offset) } @@ -112,11 +133,11 @@ func (t *ginkgoTestingTProxy) Helper() { types.MarkAsHelper(1) } -func (t *ginkgoTestingTProxy) Log(args ...interface{}) { +func (t *ginkgoTestingTProxy) Log(args ...any) { fmt.Fprintln(t.writer, args...) } -func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Logf(format string, args ...any) { t.Log(fmt.Sprintf(format, args...)) } @@ -128,7 +149,7 @@ func (t *ginkgoTestingTProxy) Parallel() { // No-op } -func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { +func (t *ginkgoTestingTProxy) Skip(args ...any) { t.skip(fmt.Sprintln(args...), t.offset) } @@ -136,7 +157,7 @@ func (t *ginkgoTestingTProxy) SkipNow() { t.skip("skip", t.offset) } -func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { +func (t *ginkgoTestingTProxy) Skipf(format string, args ...any) { t.skip(fmt.Sprintf(format, args...), t.offset) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index aab42d5fb3c..1c4e0534e4c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -121,15 +121,15 @@ func (w *Writer) ClearTeeWriters() { w.teeWriters = []io.Writer{} } -func (w *Writer) Print(a ...interface{}) { +func (w *Writer) Print(a ...any) { fmt.Fprint(w, a...) } -func (w *Writer) Printf(format string, a ...interface{}) { +func (w *Writer) Printf(format string, a ...any) { fmt.Fprintf(w, format, a...) } -func (w *Writer) Println(a ...interface{}) { +func (w *Writer) Println(a ...any) { fmt.Fprintln(w, a...) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 480730486af..74ad0768b74 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -685,11 +685,11 @@ func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { } /* Rendering text */ -func (r *DefaultReporter) f(format string, args ...interface{}) string { +func (r *DefaultReporter) f(format string, args ...any) string { return r.formatter.F(format, args...) } -func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { +func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string { return r.formatter.Fi(indentation, format, args...) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index aa1a35176aa..5bf2e62e90c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -60,7 +60,7 @@ AddReportEntry() must be called within a Subject or Setup node - not in a Contai You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports */ -func AddReportEntry(name string, args ...interface{}) { +func AddReportEntry(name string, args ...any) { cl := types.NewCodeLocation(1) reportEntry, err := internal.NewReportEntry(name, cl, args...) if err != nil { @@ -89,7 +89,7 @@ You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#g You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ func ReportBeforeEach(body any, args ...any) bool { - combinedArgs := []interface{}{body} + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) @@ -113,7 +113,7 @@ You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#ge You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ func ReportAfterEach(body any, args ...any) bool { - combinedArgs := []interface{}{body} + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) @@ -143,7 +143,7 @@ You can learn more about Ginkgo's reporting infrastructure, including generating You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ func ReportBeforeSuite(body any, args ...any) bool { - combinedArgs := []interface{}{body} + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } @@ -174,8 +174,8 @@ You can learn more about Ginkgo's reporting infrastructure, including generating You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body any, args ...interface{}) bool { - combinedArgs := []interface{}{body} +func ReportAfterSuite(text string, body any, args ...any) bool { + combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index 9074a57ac90..b9e0ca9ef76 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -23,7 +23,7 @@ You can learn more about generating EntryDescriptions here: https://onsi.github. */ type EntryDescription string -func (ed EntryDescription) render(args ...interface{}) string { +func (ed EntryDescription) render(args ...any) string { return fmt.Sprintf(string(ed), args...) } @@ -44,7 +44,7 @@ For example: You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ -func DescribeTable(description string, args ...interface{}) bool { +func DescribeTable(description string, args ...any) bool { GinkgoHelper() generateTable(description, false, args...) return true @@ -53,7 +53,7 @@ func DescribeTable(description string, args ...interface{}) bool { /* You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ -func FDescribeTable(description string, args ...interface{}) bool { +func FDescribeTable(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Focus) generateTable(description, false, args...) @@ -63,7 +63,7 @@ func FDescribeTable(description string, args ...interface{}) bool { /* You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ -func PDescribeTable(description string, args ...interface{}) bool { +func PDescribeTable(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Pending) generateTable(description, false, args...) @@ -109,7 +109,7 @@ Note that you **must** place define an It inside the body function. You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ -func DescribeTableSubtree(description string, args ...interface{}) bool { +func DescribeTableSubtree(description string, args ...any) bool { GinkgoHelper() generateTable(description, true, args...) return true @@ -118,7 +118,7 @@ func DescribeTableSubtree(description string, args ...interface{}) bool { /* You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`. */ -func FDescribeTableSubtree(description string, args ...interface{}) bool { +func FDescribeTableSubtree(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Focus) generateTable(description, true, args...) @@ -128,7 +128,7 @@ func FDescribeTableSubtree(description string, args ...interface{}) bool { /* You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`. */ -func PDescribeTableSubtree(description string, args ...interface{}) bool { +func PDescribeTableSubtree(description string, args ...any) bool { GinkgoHelper() args = append(args, internal.Pending) generateTable(description, true, args...) @@ -144,9 +144,9 @@ var XDescribeTableSubtree = PDescribeTableSubtree TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ type TableEntry struct { - description interface{} - decorations []interface{} - parameters []interface{} + description any + decorations []any + parameters []any codeLocation types.CodeLocation } @@ -162,7 +162,7 @@ If you want to generate interruptible specs simply write a Table function that a You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ -func Entry(description interface{}, args ...interface{}) TableEntry { +func Entry(description any, args ...any) TableEntry { GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} @@ -171,7 +171,7 @@ func Entry(description interface{}, args ...interface{}) TableEntry { /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ -func FEntry(description interface{}, args ...interface{}) TableEntry { +func FEntry(description any, args ...any) TableEntry { GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) @@ -181,7 +181,7 @@ func FEntry(description interface{}, args ...interface{}) TableEntry { /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ -func PEntry(description interface{}, args ...interface{}) TableEntry { +func PEntry(description any, args ...any) TableEntry { GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) @@ -196,17 +196,17 @@ var XEntry = PEntry var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func generateTable(description string, isSubtree bool, args ...interface{}) { +func generateTable(description string, isSubtree bool, args ...any) { GinkgoHelper() cl := types.NewCodeLocation(0) - containerNodeArgs := []interface{}{cl} + containerNodeArgs := []any{cl} entries := []TableEntry{} - var internalBody interface{} + var internalBody any var internalBodyType reflect.Type - var tableLevelEntryDescription interface{} - tableLevelEntryDescription = func(args ...interface{}) string { + var tableLevelEntryDescription any + tableLevelEntryDescription = func(args ...any) string { out := []string{} for _, arg := range args { out = append(out, fmt.Sprint(arg)) @@ -265,7 +265,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - internalNodeArgs := []interface{}{entry.codeLocation} + internalNodeArgs := []any{entry.codeLocation} internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false @@ -290,7 +290,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { if err != nil { panic(err) } - invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...)) + invokeFunction(internalBody, append([]any{c}, entry.parameters...)) }) if isSubtree { exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl)) @@ -316,7 +316,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) } -func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value { +func invokeFunction(function any, parameters []any) []reflect.Value { inValues := make([]reflect.Value, len(parameters)) funcType := reflect.TypeOf(function) @@ -339,7 +339,7 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va return reflect.ValueOf(function).Call(inValues) } -func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error { +func validateParameters(function any, parameters []any, kind string, cl types.CodeLocation, hasContext bool) error { funcType := reflect.TypeOf(function) limit := funcType.NumIn() offset := 0 @@ -377,7 +377,7 @@ func validateParameters(function interface{}, parameters []interface{}, kind str return nil } -func computeValue(parameter interface{}, t reflect.Type) reflect.Value { +func computeValue(parameter any, t reflect.Type) reflect.Value { if parameter == nil { return reflect.Zero(t) } else { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 8c0dfab8c0e..3d543c1219b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -257,8 +257,12 @@ var FlagSections = GinkgoFlagSections{ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, - {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, - {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis", + Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis", + Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.", + }, {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, @@ -365,7 +369,7 @@ var ReporterConfigFlags = GinkgoFlags{ func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) flags = flags.WithPrefix("ginkgo") - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "D": &deprecatedConfig{}, @@ -572,7 +576,7 @@ var GoBuildFlags = GinkgoFlags{ // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI var GoRunFlags = GinkgoFlags{ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", - Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`}, {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", @@ -600,6 +604,22 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) } + if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile)) + } + if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile)) + } + if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile)) + } + if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile)) + } + if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) { + errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile)) + } + //initialize the output directory if cliConfig.OutputDir != "" { err := os.MkdirAll(cliConfig.OutputDir, 0777) @@ -646,7 +666,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, - map[string]interface{}{ + map[string]any{ "Go": &goFlagsConfig, }, ) @@ -665,7 +685,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": &suiteConfig, "R": &reporterConfig, "Go": &goFlagsConfig, @@ -677,7 +697,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC // GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { flags := GoRunFlags.WithPrefix("test") - bindings := map[string]interface{}{ + bindings := map[string]any{ "Go": &goFlagsConfig, } @@ -699,7 +719,7 @@ func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterCo flags = flags.CopyAppend(GoBuildFlags...) flags = flags.CopyAppend(GoRunFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "C": cliConfig, @@ -720,7 +740,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter flags = flags.CopyAppend(GoBuildFlags...) flags = flags.CopyAppend(GoRunFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "S": suiteConfig, "R": reporterConfig, "C": cliConfig, @@ -736,7 +756,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) - bindings := map[string]interface{}{ + bindings := map[string]any{ "C": cliConfig, "Go": goFlagsConfig, "D": &deprecatedConfig{}, @@ -760,7 +780,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") - bindings := map[string]interface{}{ + bindings := map[string]any{ "C": cliConfig, } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go index 17922304b63..518989a8440 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -113,7 +113,7 @@ type DeprecatedSpecFailure struct { type DeprecatedSpecMeasurement struct { Name string - Info interface{} + Info any Order int Results []float64 diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 6bb72d00ccd..854252ac2a6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -88,7 +88,7 @@ body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, n } } -func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error { return GinkgoError{ Heading: "Assertion or Panic detected during tree construction", Message: formatter.F( @@ -189,7 +189,7 @@ func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl } } -func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error { return GinkgoError{ Heading: "Unknown Decorator", Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), @@ -345,7 +345,7 @@ func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { } /* ReportEntry errors */ -func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error { return GinkgoError{ Heading: "Too Many ReportEntry Values", Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), @@ -539,7 +539,7 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { /* Configuration errors */ -func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error { return GinkgoError{ Heading: "Unknown Type passed to RunSpecs", Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), @@ -629,6 +629,13 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error { } } +func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error { + return GinkgoError{ + Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path), + Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag), + } +} + /* Stack-Trace parsing errors */ func (g ginkgoErrors) FailedToParseStackTrace(message string) error { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index de69f3022de..8409653f971 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -92,7 +92,7 @@ func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { type GinkgoFlagSet struct { flags GinkgoFlags - bindings interface{} + bindings any sections GinkgoFlagSections extraGoFlagsSection GinkgoFlagSection @@ -101,7 +101,7 @@ type GinkgoFlagSet struct { } // Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet -func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { +func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) { return bindFlagSet(GinkgoFlagSet{ flags: flags, bindings: bindings, @@ -110,7 +110,7 @@ func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFl } // Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet -func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { return bindFlagSet(GinkgoFlagSet{ flags: flags, bindings: bindings, @@ -335,7 +335,7 @@ func (f GinkgoFlagSet) substituteUsage() { fmt.Fprintln(f.flagSet.Output(), f.Usage()) } -func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { +func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) { if len(keyPath) == 0 { return reflect.Value{}, false } @@ -433,7 +433,7 @@ func (ssv stringSliceVar) Set(s string) error { } // given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. -func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { +func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) { result := []string{} for _, flag := range flags { name := flag.ExportAs diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 7fdc8aa23f3..40a909b6d54 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -343,7 +343,7 @@ func tokenize(input string) func() (*treeNode, error) { consumeUntil := func(cutset string) (string, int) { j := i for ; j < len(runes); j++ { - if strings.IndexRune(cutset, runes[j]) >= 0 { + if strings.ContainsRune(cutset, runes[j]) { break } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index 7b1524b52e8..63f7a9f6dad 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -9,18 +9,18 @@ import ( // ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports // and across the network connection when running in parallel type ReportEntryValue struct { - raw interface{} //unexported to prevent gob from freaking out about unregistered structs + raw any //unexported to prevent gob from freaking out about unregistered structs AsJSON string Representation string } -func WrapEntryValue(value interface{}) ReportEntryValue { +func WrapEntryValue(value any) ReportEntryValue { return ReportEntryValue{ raw: value, } } -func (rev ReportEntryValue) GetRawValue() interface{} { +func (rev ReportEntryValue) GetRawValue() any { return rev.raw } @@ -118,7 +118,7 @@ func (entry ReportEntry) StringRepresentation() string { // If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be // a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON // field yourself. -func (entry ReportEntry) GetRawValue() interface{} { +func (entry ReportEntry) GetRawValue() any { return entry.Value.GetRawValue() } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 879e1d86cb8..8d38790bf25 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.22.2" +const VERSION = "2.23.1" diff --git a/vendor/github.com/opencontainers/cgroups/CODEOWNERS b/vendor/github.com/opencontainers/cgroups/CODEOWNERS new file mode 100644 index 00000000000..7201e35ac92 --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/CODEOWNERS @@ -0,0 +1 @@ +* @maintainer1 @maintainer2 @maintainer3 diff --git a/vendor/github.com/opencontainers/cgroups/CONTRIBUTING.md b/vendor/github.com/opencontainers/cgroups/CONTRIBUTING.md new file mode 100644 index 00000000000..135abcf02fd --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/CONTRIBUTING.md @@ -0,0 +1,150 @@ +# Contribution Guidelines + +Development happens on GitHub. +Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The content of this repository is licensed under the [Apache License, Version 2.0](LICENSE). + +## Code of Conduct + +Participation in the Open Container community is governed by [Open Container Code of Conduct][code-of-conduct]. + +## Meetings + +The contributors and maintainers of all OCI projects have monthly meetings at 2:00 PM (USA Pacific) on the first Wednesday of every month. +There is an [iCalendar][rfc5545] format for the meetings [here][meeting.ics]. +Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1 415 968 0849 (no PIN needed). +An initial agenda will be posted to the [mailing list](#mailing-list) in the week before each meeting, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes from past meetings are archived [here][minutes]. + +## Mailing list + +You can subscribe and browse the mailing list on [Google Groups][mailing-list]. + +## IRC + +OCI discussion happens on #opencontainers on [Freenode][] ([logs][irc-logs]). + +## Git + +### Security issues + +If you are reporting a security issue, do not create an issue or file a pull +request on GitHub. Instead, disclose the issue responsibly by sending an email +to security@opencontainers.org (which is inhabited only by the maintainers of +the various OCI projects). + +### Pull requests are always welcome + +We are always thrilled to receive pull requests, and do our best to +process them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! If there's a problem with the implementation, hopefully you +received feedback on what to improve. + +We're trying very hard to keep the project lean and focused. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. + +### Conventions + +Fork the repo and make changes on your fork in a feature branch. +For larger bugs and enhancements, consider filing a leader issue or mailing-list thread for discussion that is independent of the implementation. +Small changes or changes that have been discussed on the [project mailing list](#mailing-list) may be submitted without a leader issue. + +If the project has a test suite, submit unit tests for your changes. Take a +look at existing tests for inspiration. Run the full test suite on your branch +before submitting a pull request. + +Update the documentation when creating or modifying features. Test +your documentation changes for clarity, concision, and correctness, as +well as a clean documentation build. + +Pull requests descriptions should be as clear as possible and include a +reference to all the issues that they address. + +Commit messages must start with a capitalized and short summary +written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Be +sure to post a comment after pushing. The new commits will show up in the pull +request automatically, but the reviewers will not be notified unless you +comment. + +Before the pull request is merged, make sure that you squash your commits into +logical units of work using `git rebase -i` and `git push -f`. After every +commit the test suite (if any) should be passing. Include documentation changes +in the same commit so that a revert would remove all traces of the feature or +fix. + +Commits that fix or close an issue should include a reference like `Closes #XXX` +or `Fixes #XXX`, which will automatically close the issue when merged. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from [developercertificate.org][]): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +[code-of-conduct]: https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md +[developercertificate.org]: http://developercertificate.org/ +[Freenode]: https://freenode.net/ +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ +[mailing-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev +[meeting.ics]: https://github.com/opencontainers/runtime-spec/blob/master/meeting.ics +[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ +[rfc5545]: https://tools.ietf.org/html/rfc5545 +[UberConference]: https://www.uberconference.com/opencontainers diff --git a/vendor/github.com/opencontainers/cgroups/GOVERNANCE.md b/vendor/github.com/opencontainers/cgroups/GOVERNANCE.md new file mode 100644 index 00000000000..3b7b32f788c --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/GOVERNANCE.md @@ -0,0 +1,63 @@ +# Project governance + +The [OCI charter][charter] §5.b.viii tasks an OCI Project's maintainers (listed in the repository's MAINTAINERS file and sometimes referred to as "the TDC", [§5.e][charter]) with: + +> Creating, maintaining and enforcing governance guidelines for the TDC, approved by the maintainers, and which shall be posted visibly for the TDC. + +This section describes generic rules and procedures for fulfilling that mandate. + +## Proposing a motion + +A maintainer SHOULD propose a motion on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with another maintainer as a co-sponsor. + +## Voting + +Voting on a proposed motion SHOULD happen on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with maintainers posting LGTM or REJECT. +Maintainers MAY also explicitly not vote by posting ABSTAIN (which is useful to revert a previous vote). +Maintainers MAY post multiple times (e.g. as they revise their position based on feedback), but only their final post counts in the tally. +A proposed motion is adopted if two-thirds of votes cast, a quorum having voted, are in favor of the release. + +Voting SHOULD remain open for a week to collect feedback from the wider community and allow the maintainers to digest the proposed motion. +Under exceptional conditions (e.g. non-major security fix releases) proposals which reach quorum with unanimous support MAY be adopted earlier. + +A maintainer MAY choose to reply with REJECT. +A maintainer posting a REJECT MUST include a list of concerns or links to written documentation for those concerns (e.g. GitHub issues or mailing-list threads). +The maintainers SHOULD try to resolve the concerns and wait for the rejecting maintainer to change their opinion to LGTM. +However, a motion MAY be adopted with REJECTs, as outlined in the previous paragraphs. + +## Quorum + +A quorum is established when at least two-thirds of maintainers have voted. + +For projects that are not specifications, a [motion to release](#release-approval) MAY be adopted if the tally is at least three LGTMs and no REJECTs, even if three votes does not meet the usual two-thirds quorum. + +## Amendments + +The [project governance](#project-governance) rules and procedures MAY be amended or replaced using the procedures themselves. +The MAINTAINERS of this project governance document is the total set of MAINTAINERS from all Open Containers projects (go-digest, image-spec, image-tools, runC, runtime-spec, runtime-tools, and selinux). + +## Subject templates + +Maintainers are busy and get lots of email. +To make project proposals recognizable, proposed motions SHOULD use the following subject templates. + +### Proposing a motion + +> [{project} VOTE]: {motion description} (closes {end of voting window}) + +For example: + +> [runtime-spec VOTE]: Tag 0647920 as 1.0.0-rc (closes 2016-06-03 20:00 UTC) + +### Tallying results + +After voting closes, a maintainer SHOULD post a tally to the motion thread with a subject template like: + +> [{project} {status}]: {motion description} (+{LGTMs} -{REJECTs} #{ABSTAINs}) + +Where `{status}` is either `adopted` or `rejected`. +For example: + +> [runtime-spec adopted]: Tag 0647920 as 1.0.0-rc (+6 -0 #3) + +[charter]: https://www.opencontainers.org/about/governance diff --git a/vendor/github.com/opencontainers/cgroups/LICENSE b/vendor/github.com/opencontainers/cgroups/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/cgroups/MAINTAINERS b/vendor/github.com/opencontainers/cgroups/MAINTAINERS new file mode 100644 index 00000000000..413edcb7d33 --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/MAINTAINERS @@ -0,0 +1,8 @@ +Akihiro Suda (@AkihiroSuda) +Aleksa Sarai (@cyphar) +Kir Kolyshkin (@kolyshkin) +Mrunal Patel (@mrunalp) +Sebastiaan van Stijn (@thaJeztah) +Odin Ugedal (@odinuge) +Peter Hunt (@haircommander) +Davanum Srinivas (@dims) diff --git a/vendor/github.com/opencontainers/cgroups/MAINTAINERS_GUIDE.md b/vendor/github.com/opencontainers/cgroups/MAINTAINERS_GUIDE.md new file mode 100644 index 00000000000..8e96917473f --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/MAINTAINERS_GUIDE.md @@ -0,0 +1,92 @@ +## Introduction + +Dear maintainer. Thank you for investing the time and energy to help +make this project as useful as possible. Maintaining a project is difficult, +sometimes unrewarding work. Sure, you will get to contribute cool +features to the project. But most of your time will be spent reviewing, +cleaning up, documenting, answering questions, justifying design +decisions - while everyone has all the fun! But remember - the quality +of the maintainers work is what distinguishes the good projects from the +great. So please be proud of your work, even the unglamourous parts, +and encourage a culture of appreciation and respect for *every* aspect +of improving the project - not just the hot new features. + +This document is a manual for maintainers old and new. It explains what +is expected of maintainers, how they should work, and what tools are +available to them. + +This is a living document - if you see something out of date or missing, +speak up! + +## What are a maintainer's responsibilities? + +It is every maintainer's responsibility to: + +* Expose a clear roadmap for improving their component. +* Deliver prompt feedback and decisions on pull requests. +* Be available to anyone with questions, bug reports, criticism etc. on their component. + This includes IRC and GitHub issues and pull requests. +* Make sure their component respects the philosophy, design and roadmap of the project. + +## How are decisions made? + +This project is an open-source project with an open design philosophy. This +means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, roadmap and APIs. *If it's +part of the project, it's in the repo. It's in the repo, it's part of +the project.* + +As a result, all decisions can be expressed as changes to the +repository. An implementation change is a change to the source code. An +API change is a change to the API specification. A philosophy change is +a change to the philosophy manifesto. And so on. + +All decisions affecting this project, big and small, follow the same procedure: + +1. Discuss a proposal on the [mailing list](CONTRIBUTING.md#mailing-list). + Anyone can do this. +2. Open a pull request. + Anyone can do this. +3. Discuss the pull request. + Anyone can do this. +4. Endorse (`LGTM`) or oppose (`Rejected`) the pull request. + The relevant maintainers do this (see below [Who decides what?](#who-decides-what)). + Changes that affect project management (changing policy, cutting releases, etc.) are [proposed and voted on the mailing list](GOVERNANCE.md). +5. Merge or close the pull request. + The relevant maintainers do this. + +### I'm a maintainer, should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +## Who decides what? + +All decisions are pull requests, and the relevant maintainers make +decisions by accepting or refusing the pull request. Review and acceptance +by anyone is denoted by adding a comment in the pull request: `LGTM`. +However, only currently listed `MAINTAINERS` are counted towards the required +two LGTMs. In addition, if a maintainer has created a pull request, they cannot +count toward the two LGTM rule (to ensure equal amounts of review for every pull +request, no matter who wrote it). + +Overall the maintainer system works because of mutual respect. +The maintainers trust one another to act in the best interests of the project. +Sometimes maintainers can disagree and this is part of a healthy project to represent the points of view of various people. +In the case where maintainers cannot find agreement on a specific change, maintainers should use the [governance procedure](GOVERNANCE.md) to attempt to reach a consensus. + +### How are maintainers added? + +The best maintainers have a vested interest in the project. Maintainers +are first and foremost contributors that have shown they are committed to +the long term success of the project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, +pull request review, and triage of issues in the project for more than two months. + +Just contributing does not make you a maintainer, it is about building trust with the current maintainers of the project and being a person that they can depend on to act in the best interest of the project. +The final vote to add a new maintainer should be approved by the [governance procedure](GOVERNANCE.md). + +### How are maintainers removed? + +When a maintainer is unable to perform the [required duties](#what-are-a-maintainers-responsibilities) they can be removed by the [governance procedure](GOVERNANCE.md). +Issues related to a maintainer's performance should be discussed with them among the other maintainers so that they are not surprised by a pull request removing them. diff --git a/vendor/github.com/opencontainers/cgroups/README.md b/vendor/github.com/opencontainers/cgroups/README.md new file mode 100644 index 00000000000..a8187da1e8c --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/README.md @@ -0,0 +1,11 @@ +# OCI Project Template + +Useful boilerplate and organizational information for all OCI projects. + +* README (this file) +* [The Apache License, Version 2.0](LICENSE) +* [A list of maintainers](MAINTAINERS) +* [Maintainer guidelines](MAINTAINERS_GUIDE.md) +* [Contributor guidelines](CONTRIBUTING.md) +* [Project governance](GOVERNANCE.md) +* [Release procedures](RELEASES.md) diff --git a/vendor/github.com/opencontainers/cgroups/RELEASES.md b/vendor/github.com/opencontainers/cgroups/RELEASES.md new file mode 100644 index 00000000000..028ec265df9 --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/RELEASES.md @@ -0,0 +1,51 @@ +# Releases + +The release process hopes to encourage early, consistent consensus-building during project development. +The mechanisms used are regular community communication on the mailing list about progress, scheduled meetings for issue resolution and release triage, and regularly paced and communicated releases. +Releases are proposed and adopted or rejected using the usual [project governance](GOVERNANCE.md) rules and procedures. + +An anti-pattern that we want to avoid is heavy development or discussions "late cycle" around major releases. +We want to build a community that is involved and communicates consistently through all releases instead of relying on "silent periods" as a judge of stability. + +## Parallel releases + +A single project MAY consider several motions to release in parallel. +However each motion to release after the initial 0.1.0 MUST be based on a previous release that has already landed. + +For example, runtime-spec maintainers may propose a v1.0.0-rc2 on the 1st of the month and a v0.9.1 bugfix on the 2nd of the month. +They may not propose a v1.0.0-rc3 until the v1.0.0-rc2 is accepted (on the 7th if the vote initiated on the 1st passes). + +## Specifications + +The OCI maintains three categories of projects: specifications, applications, and conformance-testing tools. +However, specification releases have special restrictions in the [OCI charter][charter]: + +* They are the target of backwards compatibility (§7.g), and +* They are subject to the OFWa patent grant (§8.d and e). + +To avoid unfortunate side effects (onerous backwards compatibity requirements or Member resignations), the following additional procedures apply to specification releases: + +### Planning a release + +Every OCI specification project SHOULD hold meetings that involve maintainers reviewing pull requests, debating outstanding issues, and planning releases. +This meeting MUST be advertised on the project README and MAY happen on a phone call, video conference, or on IRC. +Maintainers MUST send updates to the dev@opencontainers.org with results of these meetings. + +Before the specification reaches v1.0.0, the meetings SHOULD be weekly. +Once a specification has reached v1.0.0, the maintainers may alter the cadence, but a meeting MUST be held within four weeks of the previous meeting. + +The release plans, corresponding milestones and estimated due dates MUST be published on GitHub (e.g. https://github.com/opencontainers/runtime-spec/milestones). +GitHub milestones and issues are only used for community organization and all releases MUST follow the [project governance](GOVERNANCE.md) rules and procedures. + +### Timelines + +Specifications have a variety of different timelines in their lifecycle. + +* Pre-v1.0.0 specifications SHOULD release on a monthly cadence to garner feedback. +* Major specification releases MUST release at least three release candidates spaced a minimum of one week apart. + This means a major release like a v1.0.0 or v2.0.0 release will take 1 month at minimum: one week for rc1, one week for rc2, one week for rc3, and one week for the major release itself. + Maintainers SHOULD strive to make zero breaking changes during this cycle of release candidates and SHOULD restart the three-candidate count when a breaking change is introduced. + For example if a breaking change is introduced in v1.0.0-rc2 then the series would end with v1.0.0-rc4 and v1.0.0. +* Minor and patch releases SHOULD be made on an as-needed basis. + +[charter]: https://www.opencontainers.org/about/governance diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/cgroups/cgroups.go similarity index 84% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go rename to vendor/github.com/opencontainers/cgroups/cgroups.go index 53e194c74dc..1f127550c08 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/cgroups/cgroups.go @@ -2,8 +2,6 @@ package cgroups import ( "errors" - - "github.com/opencontainers/runc/libcontainer/configs" ) var ( @@ -18,11 +16,11 @@ var ( // DevicesSetV1 and DevicesSetV2 are functions to set devices for // cgroup v1 and v2, respectively. Unless - // [github.com/opencontainers/runc/libcontainer/cgroups/devices] + // [github.com/opencontainers/cgroups/devices] // package is imported, it is set to nil, so cgroup managers can't // manage devices. - DevicesSetV1 func(path string, r *configs.Resources) error - DevicesSetV2 func(path string, r *configs.Resources) error + DevicesSetV1 func(path string, r *Resources) error + DevicesSetV2 func(path string, r *Resources) error ) type Manager interface { @@ -42,7 +40,7 @@ type Manager interface { GetStats() (*Stats, error) // Freeze sets the freezer cgroup to the specified state. - Freeze(state configs.FreezerState) error + Freeze(state FreezerState) error // Destroy removes cgroup. Destroy() error @@ -54,7 +52,7 @@ type Manager interface { // Set sets cgroup resources parameters/limits. If the argument is nil, // the resources specified during Manager creation (or the previous call // to Set) are used. - Set(r *configs.Resources) error + Set(r *Resources) error // GetPaths returns cgroup path(s) to save in a state file in order to // restore later. @@ -67,10 +65,10 @@ type Manager interface { GetPaths() map[string]string // GetCgroups returns the cgroup data as configured. - GetCgroups() (*configs.Cgroup, error) + GetCgroups() (*Cgroup, error) // GetFreezerState retrieves the current FreezerState of the cgroup. - GetFreezerState() (configs.FreezerState, error) + GetFreezerState() (FreezerState, error) // Exists returns whether the cgroup path exists or not. Exists() bool diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/cgroups/config_blkio_device.go similarity index 99% rename from vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go rename to vendor/github.com/opencontainers/cgroups/config_blkio_device.go index 865344f99c4..9dc2a034c6b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ b/vendor/github.com/opencontainers/cgroups/config_blkio_device.go @@ -1,4 +1,4 @@ -package configs +package cgroups import "fmt" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/cgroups/config_hugepages.go similarity index 91% rename from vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go rename to vendor/github.com/opencontainers/cgroups/config_hugepages.go index d30216380b5..5357dd090f4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go +++ b/vendor/github.com/opencontainers/cgroups/config_hugepages.go @@ -1,4 +1,4 @@ -package configs +package cgroups type HugepageLimit struct { // which type of hugepage to limit. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/cgroups/config_ifprio_map.go similarity index 93% rename from vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go rename to vendor/github.com/opencontainers/cgroups/config_ifprio_map.go index 9a0395eaf5f..d771603a773 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go +++ b/vendor/github.com/opencontainers/cgroups/config_ifprio_map.go @@ -1,4 +1,4 @@ -package configs +package cgroups import ( "fmt" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/cgroups/config_linux.go similarity index 98% rename from vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go rename to vendor/github.com/opencontainers/cgroups/config_linux.go index 4a34cf76fc5..ce98b3d7847 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/vendor/github.com/opencontainers/cgroups/config_linux.go @@ -1,8 +1,8 @@ -package configs +package cgroups import ( systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/opencontainers/runc/libcontainer/devices" + devices "github.com/opencontainers/cgroups/devices/config" ) type FreezerState string diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go b/vendor/github.com/opencontainers/cgroups/config_rdma.go similarity index 95% rename from vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go rename to vendor/github.com/opencontainers/cgroups/config_rdma.go index c69f2c8024b..a0bd54f04f5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go +++ b/vendor/github.com/opencontainers/cgroups/config_rdma.go @@ -1,4 +1,4 @@ -package configs +package cgroups // LinuxRdma for Linux cgroup 'rdma' resource management (Linux 4.11) type LinuxRdma struct { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/cgroups/config_unsupported.go similarity index 92% rename from vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go rename to vendor/github.com/opencontainers/cgroups/config_unsupported.go index 53f5ec5a0da..db32ec48325 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ b/vendor/github.com/opencontainers/cgroups/config_unsupported.go @@ -1,6 +1,6 @@ //go:build !linux -package configs +package cgroups // Cgroup holds properties of a cgroup on Linux // TODO Windows: This can ultimately be entirely factored out on Windows as diff --git a/vendor/github.com/opencontainers/cgroups/devices/config/device.go b/vendor/github.com/opencontainers/cgroups/devices/config/device.go new file mode 100644 index 00000000000..05ad3ef8cab --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/devices/config/device.go @@ -0,0 +1,174 @@ +package config + +import ( + "fmt" + "os" + "strconv" +) + +const ( + Wildcard = -1 +) + +type Device struct { + Rule + + // Path to the device. + Path string `json:"path"` + + // FileMode permission bits for the device. + FileMode os.FileMode `json:"file_mode"` + + // Uid of the device. + Uid uint32 `json:"uid"` + + // Gid of the device. + Gid uint32 `json:"gid"` +} + +// Permissions is a cgroupv1-style string to represent device access. It +// has to be a string for backward compatibility reasons, hence why it has +// methods to do set operations. +type Permissions string + +const ( + deviceRead uint = (1 << iota) + deviceWrite + deviceMknod +) + +func (p Permissions) toSet() uint { + var set uint + for _, perm := range p { + switch perm { + case 'r': + set |= deviceRead + case 'w': + set |= deviceWrite + case 'm': + set |= deviceMknod + } + } + return set +} + +func fromSet(set uint) Permissions { + var perm string + if set&deviceRead == deviceRead { + perm += "r" + } + if set&deviceWrite == deviceWrite { + perm += "w" + } + if set&deviceMknod == deviceMknod { + perm += "m" + } + return Permissions(perm) +} + +// Union returns the union of the two sets of Permissions. +func (p Permissions) Union(o Permissions) Permissions { + lhs := p.toSet() + rhs := o.toSet() + return fromSet(lhs | rhs) +} + +// Difference returns the set difference of the two sets of Permissions. +// In set notation, A.Difference(B) gives you A\B. +func (p Permissions) Difference(o Permissions) Permissions { + lhs := p.toSet() + rhs := o.toSet() + return fromSet(lhs &^ rhs) +} + +// Intersection computes the intersection of the two sets of Permissions. +func (p Permissions) Intersection(o Permissions) Permissions { + lhs := p.toSet() + rhs := o.toSet() + return fromSet(lhs & rhs) +} + +// IsEmpty returns whether the set of permissions in a Permissions is +// empty. +func (p Permissions) IsEmpty() bool { + return p == Permissions("") +} + +// IsValid returns whether the set of permissions is a subset of valid +// permissions (namely, {r,w,m}). +func (p Permissions) IsValid() bool { + return p == fromSet(p.toSet()) +} + +type Type rune + +const ( + WildcardDevice Type = 'a' + BlockDevice Type = 'b' + CharDevice Type = 'c' // or 'u' + FifoDevice Type = 'p' +) + +func (t Type) IsValid() bool { + switch t { + case WildcardDevice, BlockDevice, CharDevice, FifoDevice: + return true + default: + return false + } +} + +func (t Type) CanMknod() bool { + switch t { + case BlockDevice, CharDevice, FifoDevice: + return true + default: + return false + } +} + +func (t Type) CanCgroup() bool { + switch t { + case WildcardDevice, BlockDevice, CharDevice: + return true + default: + return false + } +} + +type Rule struct { + // Type of device ('c' for char, 'b' for block). If set to 'a', this rule + // acts as a wildcard and all fields other than Allow are ignored. + Type Type `json:"type"` + + // Major is the device's major number. + Major int64 `json:"major"` + + // Minor is the device's minor number. + Minor int64 `json:"minor"` + + // Permissions is the set of permissions that this rule applies to (in the + // cgroupv1 format -- any combination of "rwm"). + Permissions Permissions `json:"permissions"` + + // Allow specifies whether this rule is allowed. + Allow bool `json:"allow"` +} + +func (d *Rule) CgroupString() string { + var ( + major = strconv.FormatInt(d.Major, 10) + minor = strconv.FormatInt(d.Minor, 10) + ) + if d.Major == Wildcard { + major = "*" + } + if d.Minor == Wildcard { + minor = "*" + } + return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions) +} + +func (d *Rule) Mkdev() (uint64, error) { + return mkDev(d) +} diff --git a/vendor/github.com/opencontainers/cgroups/devices/config/mknod_unix.go b/vendor/github.com/opencontainers/cgroups/devices/config/mknod_unix.go new file mode 100644 index 00000000000..98cdc6e2fa6 --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/devices/config/mknod_unix.go @@ -0,0 +1,14 @@ +package config + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +func mkDev(d *Rule) (uint64, error) { + if d.Major == Wildcard || d.Minor == Wildcard { + return 0, errors.New("cannot mkdev() device with wildcards") + } + return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go b/vendor/github.com/opencontainers/cgroups/file.go similarity index 92% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go rename to vendor/github.com/opencontainers/cgroups/file.go index 78c5bcf0d37..c1b8f5c15f8 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go +++ b/vendor/github.com/opencontainers/cgroups/file.go @@ -5,12 +5,11 @@ import ( "errors" "fmt" "os" - "path" + "path/filepath" "strconv" "strings" "sync" - "github.com/opencontainers/runc/libcontainer/utils" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -147,12 +146,15 @@ func openFile(dir, file string, flags int) (*os.File, error) { flags |= os.O_TRUNC | os.O_CREATE mode = 0o600 } - path := path.Join(dir, utils.CleanPath(file)) + // NOTE it is important to use filepath.Clean("/"+file) here + // (see https://github.com/opencontainers/runc/issues/4103)! + path := filepath.Join(dir, filepath.Clean("/"+file)) + if prepareOpenat2() != nil { return openFallback(path, flags, mode) } - relPath := strings.TrimPrefix(path, cgroupfsPrefix) - if len(relPath) == len(path) { // non-standard path, old system? + relPath, ok := strings.CutPrefix(path, cgroupfsPrefix) + if !ok { // Non-standard path, old system? return openFallback(path, flags, mode) } @@ -171,10 +173,8 @@ func openFile(dir, file string, flags int) (*os.File, error) { // // TODO: if such usage will ever be common, amend this // to reopen cgroupRootHandle and retry openat2. - fdPath, closer := utils.ProcThreadSelf("fd/" + strconv.Itoa(int(cgroupRootHandle.Fd()))) - defer closer() - fdDest, _ := os.Readlink(fdPath) - if fdDest != cgroupfsDir { + fdDest, fdErr := os.Readlink("/proc/thread-self/fd/" + strconv.Itoa(int(cgroupRootHandle.Fd()))) + if fdErr == nil && fdDest != cgroupfsDir { // Wrap the error so it is clear that cgroupRootHandle // is opened to an unexpected/wrong directory. err = fmt.Errorf("cgroupRootHandle %d unexpectedly opened to %s != %s: %w", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/vendor/github.com/opencontainers/cgroups/fs/blkio.go similarity index 97% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go rename to vendor/github.com/opencontainers/cgroups/fs/blkio.go index c81b6562ac5..f3c4c5cf816 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go +++ b/vendor/github.com/opencontainers/cgroups/fs/blkio.go @@ -7,8 +7,7 @@ import ( "strconv" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type BlkioGroup struct { @@ -20,11 +19,11 @@ func (s *BlkioGroup) Name() string { return "blkio" } -func (s *BlkioGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *BlkioGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *BlkioGroup) Set(path string, r *configs.Resources) error { +func (s *BlkioGroup) Set(path string, r *cgroups.Resources) error { s.detectWeightFilenames(path) if r.BlkioWeight != 0 { if err := cgroups.WriteFile(path, s.weightFilename, strconv.FormatUint(uint64(r.BlkioWeight), 10)); err != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/cgroups/fs/cpu.go similarity index 92% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go rename to vendor/github.com/opencontainers/cgroups/fs/cpu.go index 62574b53c59..3e05788a3f6 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go +++ b/vendor/github.com/opencontainers/cgroups/fs/cpu.go @@ -7,9 +7,8 @@ import ( "os" "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" "golang.org/x/sys/unix" ) @@ -19,7 +18,7 @@ func (s *CpuGroup) Name() string { return "cpu" } -func (s *CpuGroup) Apply(path string, r *configs.Resources, pid int) error { +func (s *CpuGroup) Apply(path string, r *cgroups.Resources, pid int) error { if err := os.MkdirAll(path, 0o755); err != nil { return err } @@ -34,7 +33,7 @@ func (s *CpuGroup) Apply(path string, r *configs.Resources, pid int) error { return cgroups.WriteCgroupProc(path, pid) } -func (s *CpuGroup) SetRtSched(path string, r *configs.Resources) error { +func (s *CpuGroup) SetRtSched(path string, r *cgroups.Resources) error { var period string if r.CpuRtPeriod != 0 { period = strconv.FormatUint(r.CpuRtPeriod, 10) @@ -64,7 +63,7 @@ func (s *CpuGroup) SetRtSched(path string, r *configs.Resources) error { return nil } -func (s *CpuGroup) Set(path string, r *configs.Resources) error { +func (s *CpuGroup) Set(path string, r *cgroups.Resources) error { if r.CpuShares != 0 { shares := r.CpuShares if err := cgroups.WriteFile(path, "cpu.shares", strconv.FormatUint(shares, 10)); err != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/vendor/github.com/opencontainers/cgroups/fs/cpuacct.go similarity index 74% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go rename to vendor/github.com/opencontainers/cgroups/fs/cpuacct.go index 69f8f9d8cdd..391a023c751 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go +++ b/vendor/github.com/opencontainers/cgroups/fs/cpuacct.go @@ -6,20 +6,12 @@ import ( "strconv" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) const ( - cgroupCpuacctStat = "cpuacct.stat" - cgroupCpuacctUsageAll = "cpuacct.usage_all" - - nanosecondsInSecond = 1000000000 - - userModeColumn = 1 - kernelModeColumn = 2 - cuacctUsageAllColumnsNumber = 3 + nsInSec = 1000000000 // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and // on Linux it's a constant which is safe to be hard coded, @@ -34,11 +26,11 @@ func (s *CpuacctGroup) Name() string { return "cpuacct" } -func (s *CpuacctGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *CpuacctGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *CpuacctGroup) Set(_ string, _ *configs.Resources) error { +func (s *CpuacctGroup) Set(_ string, _ *cgroups.Resources) error { return nil } @@ -81,7 +73,7 @@ func getCpuUsageBreakdown(path string) (uint64, uint64, error) { const ( userField = "user" systemField = "system" - file = cgroupCpuacctStat + file = "cpuacct.stat" ) // Expected format: @@ -103,7 +95,7 @@ func getCpuUsageBreakdown(path string) (uint64, uint64, error) { return 0, 0, &parseError{Path: path, File: file, Err: err} } - return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil + return (userModeUsage * nsInSec) / clockTicks, (kernelModeUsage * nsInSec) / clockTicks, nil } func getPercpuUsage(path string) ([]uint64, error) { @@ -113,7 +105,6 @@ func getPercpuUsage(path string) ([]uint64, error) { if err != nil { return percpuUsage, err } - // TODO: use strings.SplitN instead. for _, value := range strings.Fields(data) { value, err := strconv.ParseUint(value, 10, 64) if err != nil { @@ -127,7 +118,7 @@ func getPercpuUsage(path string) ([]uint64, error) { func getPercpuUsageInModes(path string) ([]uint64, []uint64, error) { usageKernelMode := []uint64{} usageUserMode := []uint64{} - const file = cgroupCpuacctUsageAll + const file = "cpuacct.usage_all" fd, err := cgroups.OpenFile(path, file, os.O_RDONLY) if os.IsNotExist(err) { @@ -141,22 +132,23 @@ func getPercpuUsageInModes(path string) ([]uint64, []uint64, error) { scanner.Scan() // skipping header line for scanner.Scan() { - lineFields := strings.SplitN(scanner.Text(), " ", cuacctUsageAllColumnsNumber+1) - if len(lineFields) != cuacctUsageAllColumnsNumber { + // Each line is: cpu user system + fields := strings.SplitN(scanner.Text(), " ", 3) + if len(fields) != 3 { continue } - usageInKernelMode, err := strconv.ParseUint(lineFields[kernelModeColumn], 10, 64) + user, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { return nil, nil, &parseError{Path: path, File: file, Err: err} } - usageKernelMode = append(usageKernelMode, usageInKernelMode) + usageUserMode = append(usageUserMode, user) - usageInUserMode, err := strconv.ParseUint(lineFields[userModeColumn], 10, 64) + kernel, err := strconv.ParseUint(fields[2], 10, 64) if err != nil { return nil, nil, &parseError{Path: path, File: file, Err: err} } - usageUserMode = append(usageUserMode, usageInUserMode) + usageKernelMode = append(usageKernelMode, kernel) } if err := scanner.Err(); err != nil { return nil, nil, &parseError{Path: path, File: file, Err: err} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/github.com/opencontainers/cgroups/fs/cpuset.go similarity index 67% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go rename to vendor/github.com/opencontainers/cgroups/fs/cpuset.go index fe01ba98408..ef6ff7da303 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go +++ b/vendor/github.com/opencontainers/cgroups/fs/cpuset.go @@ -6,32 +6,66 @@ import ( "path/filepath" "strconv" "strings" + "sync" "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) +var ( + cpusetLock sync.Mutex + cpusetPrefix = "cpuset." + cpusetFastPath bool +) + +func cpusetFile(path string, name string) string { + cpusetLock.Lock() + defer cpusetLock.Unlock() + + // Only the v1 cpuset cgroup is allowed to mount with noprefix. + // See kernel source: https://github.com/torvalds/linux/blob/2e1b3cc9d7f790145a80cb705b168f05dab65df2/kernel/cgroup/cgroup-v1.c#L1070 + // Cpuset cannot be mounted with and without prefix simultaneously. + // Commonly used in Android environments. + + if cpusetFastPath { + return cpusetPrefix + name + } + + err := unix.Access(filepath.Join(path, cpusetPrefix+name), unix.F_OK) + if err == nil { + // Use the fast path only if we can access one type of mount for cpuset already + cpusetFastPath = true + } else { + err = unix.Access(filepath.Join(path, name), unix.F_OK) + if err == nil { + cpusetPrefix = "" + cpusetFastPath = true + } + } + + return cpusetPrefix + name +} + type CpusetGroup struct{} func (s *CpusetGroup) Name() string { return "cpuset" } -func (s *CpusetGroup) Apply(path string, r *configs.Resources, pid int) error { +func (s *CpusetGroup) Apply(path string, r *cgroups.Resources, pid int) error { return s.ApplyDir(path, r, pid) } -func (s *CpusetGroup) Set(path string, r *configs.Resources) error { +func (s *CpusetGroup) Set(path string, r *cgroups.Resources) error { if r.CpusetCpus != "" { - if err := cgroups.WriteFile(path, "cpuset.cpus", r.CpusetCpus); err != nil { + if err := cgroups.WriteFile(path, cpusetFile(path, "cpus"), r.CpusetCpus); err != nil { return err } } if r.CpusetMems != "" { - if err := cgroups.WriteFile(path, "cpuset.mems", r.CpusetMems); err != nil { + if err := cgroups.WriteFile(path, cpusetFile(path, "mems"), r.CpusetMems); err != nil { return err } } @@ -49,26 +83,23 @@ func getCpusetStat(path string, file string) ([]uint16, error) { } for _, s := range strings.Split(fileContent, ",") { - sp := strings.SplitN(s, "-", 3) - switch len(sp) { - case 3: - return extracted, &parseError{Path: path, File: file, Err: errors.New("extra dash")} - case 2: - min, err := strconv.ParseUint(sp[0], 10, 16) + fromStr, toStr, ok := strings.Cut(s, "-") + if ok { + from, err := strconv.ParseUint(fromStr, 10, 16) if err != nil { return extracted, &parseError{Path: path, File: file, Err: err} } - max, err := strconv.ParseUint(sp[1], 10, 16) + to, err := strconv.ParseUint(toStr, 10, 16) if err != nil { return extracted, &parseError{Path: path, File: file, Err: err} } - if min > max { - return extracted, &parseError{Path: path, File: file, Err: errors.New("invalid values, min > max")} + if from > to { + return extracted, &parseError{Path: path, File: file, Err: errors.New("invalid values, from > to")} } - for i := min; i <= max; i++ { + for i := from; i <= to; i++ { extracted = append(extracted, uint16(i)) } - case 1: + } else { value, err := strconv.ParseUint(s, 10, 16) if err != nil { return extracted, &parseError{Path: path, File: file, Err: err} @@ -83,57 +114,57 @@ func getCpusetStat(path string, file string) ([]uint16, error) { func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { var err error - stats.CPUSetStats.CPUs, err = getCpusetStat(path, "cpuset.cpus") + stats.CPUSetStats.CPUs, err = getCpusetStat(path, cpusetFile(path, "cpus")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.CPUExclusive, err = fscommon.GetCgroupParamUint(path, "cpuset.cpu_exclusive") + stats.CPUSetStats.CPUExclusive, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "cpu_exclusive")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.Mems, err = getCpusetStat(path, "cpuset.mems") + stats.CPUSetStats.Mems, err = getCpusetStat(path, cpusetFile(path, "mems")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.MemHardwall, err = fscommon.GetCgroupParamUint(path, "cpuset.mem_hardwall") + stats.CPUSetStats.MemHardwall, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "mem_hardwall")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.MemExclusive, err = fscommon.GetCgroupParamUint(path, "cpuset.mem_exclusive") + stats.CPUSetStats.MemExclusive, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "mem_exclusive")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.MemoryMigrate, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_migrate") + stats.CPUSetStats.MemoryMigrate, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "memory_migrate")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.MemorySpreadPage, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_spread_page") + stats.CPUSetStats.MemorySpreadPage, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "memory_spread_page")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.MemorySpreadSlab, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_spread_slab") + stats.CPUSetStats.MemorySpreadSlab, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "memory_spread_slab")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.MemoryPressure, err = fscommon.GetCgroupParamUint(path, "cpuset.memory_pressure") + stats.CPUSetStats.MemoryPressure, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "memory_pressure")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.SchedLoadBalance, err = fscommon.GetCgroupParamUint(path, "cpuset.sched_load_balance") + stats.CPUSetStats.SchedLoadBalance, err = fscommon.GetCgroupParamUint(path, cpusetFile(path, "sched_load_balance")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - stats.CPUSetStats.SchedRelaxDomainLevel, err = fscommon.GetCgroupParamInt(path, "cpuset.sched_relax_domain_level") + stats.CPUSetStats.SchedRelaxDomainLevel, err = fscommon.GetCgroupParamInt(path, cpusetFile(path, "sched_relax_domain_level")) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -141,7 +172,7 @@ func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { return nil } -func (s *CpusetGroup) ApplyDir(dir string, r *configs.Resources, pid int) error { +func (s *CpusetGroup) ApplyDir(dir string, r *cgroups.Resources, pid int) error { // This might happen if we have no cpuset cgroup mounted. // Just do nothing and don't fail. if dir == "" { @@ -172,10 +203,10 @@ func (s *CpusetGroup) ApplyDir(dir string, r *configs.Resources, pid int) error } func getCpusetSubsystemSettings(parent string) (cpus, mems string, err error) { - if cpus, err = cgroups.ReadFile(parent, "cpuset.cpus"); err != nil { + if cpus, err = cgroups.ReadFile(parent, cpusetFile(parent, "cpus")); err != nil { return } - if mems, err = cgroups.ReadFile(parent, "cpuset.mems"); err != nil { + if mems, err = cgroups.ReadFile(parent, cpusetFile(parent, "mems")); err != nil { return } return cpus, mems, nil @@ -221,12 +252,12 @@ func cpusetCopyIfNeeded(current, parent string) error { } if isEmptyCpuset(currentCpus) { - if err := cgroups.WriteFile(current, "cpuset.cpus", parentCpus); err != nil { + if err := cgroups.WriteFile(current, cpusetFile(current, "cpus"), parentCpus); err != nil { return err } } if isEmptyCpuset(currentMems) { - if err := cgroups.WriteFile(current, "cpuset.mems", parentMems); err != nil { + if err := cgroups.WriteFile(current, cpusetFile(current, "mems"), parentMems); err != nil { return err } } @@ -237,7 +268,7 @@ func isEmptyCpuset(str string) bool { return str == "" || str == "\n" } -func (s *CpusetGroup) ensureCpusAndMems(path string, r *configs.Resources) error { +func (s *CpusetGroup) ensureCpusAndMems(path string, r *cgroups.Resources) error { if err := s.Set(path, r); err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/vendor/github.com/opencontainers/cgroups/fs/devices.go similarity index 71% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go rename to vendor/github.com/opencontainers/cgroups/fs/devices.go index 0bf3d9debb9..26483ecb7dd 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go +++ b/vendor/github.com/opencontainers/cgroups/fs/devices.go @@ -1,8 +1,7 @@ package fs import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type DevicesGroup struct{} @@ -11,7 +10,7 @@ func (s *DevicesGroup) Name() string { return "devices" } -func (s *DevicesGroup) Apply(path string, r *configs.Resources, pid int) error { +func (s *DevicesGroup) Apply(path string, r *cgroups.Resources, pid int) error { if r.SkipDevices { return nil } @@ -24,7 +23,7 @@ func (s *DevicesGroup) Apply(path string, r *configs.Resources, pid int) error { return apply(path, pid) } -func (s *DevicesGroup) Set(path string, r *configs.Resources) error { +func (s *DevicesGroup) Set(path string, r *cgroups.Resources) error { if cgroups.DevicesSetV1 == nil { if len(r.Devices) == 0 { return nil diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go b/vendor/github.com/opencontainers/cgroups/fs/error.go similarity index 84% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go rename to vendor/github.com/opencontainers/cgroups/fs/error.go index f2ab6f130e3..f13033e3d8b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go +++ b/vendor/github.com/opencontainers/cgroups/fs/error.go @@ -3,7 +3,7 @@ package fs import ( "fmt" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/cgroups/fscommon" ) type parseError = fscommon.ParseError diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/cgroups/fs/freezer.go similarity index 79% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go rename to vendor/github.com/opencontainers/cgroups/fs/freezer.go index 987f1bf5e74..dae4a605894 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ b/vendor/github.com/opencontainers/cgroups/fs/freezer.go @@ -7,8 +7,7 @@ import ( "strings" "time" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -19,19 +18,19 @@ func (s *FreezerGroup) Name() string { return "freezer" } -func (s *FreezerGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *FreezerGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) { +func (s *FreezerGroup) Set(path string, r *cgroups.Resources) (Err error) { switch r.Freezer { - case configs.Frozen: + case cgroups.Frozen: defer func() { if Err != nil { // Freezing failed, and it is bad and dangerous // to leave the cgroup in FROZEN or FREEZING // state, so (try to) thaw it back. - _ = cgroups.WriteFile(path, "freezer.state", string(configs.Thawed)) + _ = cgroups.WriteFile(path, "freezer.state", string(cgroups.Thawed)) } }() @@ -64,11 +63,11 @@ func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) { // the chances to succeed in freezing // in case new processes keep appearing // in the cgroup. - _ = cgroups.WriteFile(path, "freezer.state", string(configs.Thawed)) + _ = cgroups.WriteFile(path, "freezer.state", string(cgroups.Thawed)) time.Sleep(10 * time.Millisecond) } - if err := cgroups.WriteFile(path, "freezer.state", string(configs.Frozen)); err != nil { + if err := cgroups.WriteFile(path, "freezer.state", string(cgroups.Frozen)); err != nil { return err } @@ -87,7 +86,7 @@ func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) { switch state { case "FREEZING": continue - case string(configs.Frozen): + case string(cgroups.Frozen): if i > 1 { logrus.Debugf("frozen after %d retries", i) } @@ -99,9 +98,9 @@ func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) { } // Despite our best efforts, it got stuck in FREEZING. return errors.New("unable to freeze") - case configs.Thawed: - return cgroups.WriteFile(path, "freezer.state", string(configs.Thawed)) - case configs.Undefined: + case cgroups.Thawed: + return cgroups.WriteFile(path, "freezer.state", string(cgroups.Thawed)) + case cgroups.Undefined: return nil default: return fmt.Errorf("Invalid argument '%s' to freezer.state", string(r.Freezer)) @@ -112,7 +111,7 @@ func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error { return nil } -func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) { +func (s *FreezerGroup) GetState(path string) (cgroups.FreezerState, error) { for { state, err := cgroups.ReadFile(path, "freezer.state") if err != nil { @@ -121,11 +120,11 @@ func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) { if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) { err = nil } - return configs.Undefined, err + return cgroups.Undefined, err } switch strings.TrimSpace(state) { case "THAWED": - return configs.Thawed, nil + return cgroups.Thawed, nil case "FROZEN": // Find out whether the cgroup is frozen directly, // or indirectly via an ancestor. @@ -136,15 +135,15 @@ func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) { if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.ENODEV) { err = nil } - return configs.Frozen, err + return cgroups.Frozen, err } switch self { case "0\n": - return configs.Thawed, nil + return cgroups.Thawed, nil case "1\n": - return configs.Frozen, nil + return cgroups.Frozen, nil default: - return configs.Undefined, fmt.Errorf(`unknown "freezer.self_freezing" state: %q`, self) + return cgroups.Undefined, fmt.Errorf(`unknown "freezer.self_freezing" state: %q`, self) } case "FREEZING": // Make sure we get a stable freezer state, so retry if the cgroup @@ -152,7 +151,7 @@ func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) { time.Sleep(1 * time.Millisecond) continue default: - return configs.Undefined, fmt.Errorf("unknown freezer.state %q", state) + return cgroups.Undefined, fmt.Errorf("unknown freezer.state %q", state) } } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go b/vendor/github.com/opencontainers/cgroups/fs/fs.go similarity index 89% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go rename to vendor/github.com/opencontainers/cgroups/fs/fs.go index ba15bfc4077..23a8fb8742f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go +++ b/vendor/github.com/opencontainers/cgroups/fs/fs.go @@ -8,9 +8,8 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) var subsystems = []subsystem{ @@ -49,22 +48,22 @@ type subsystem interface { // Apply creates and joins a cgroup, adding pid into it. Some // subsystems use resources to pre-configure the cgroup parents // before creating or joining it. - Apply(path string, r *configs.Resources, pid int) error + Apply(path string, r *cgroups.Resources, pid int) error // Set sets the cgroup resources. - Set(path string, r *configs.Resources) error + Set(path string, r *cgroups.Resources) error } type Manager struct { mu sync.Mutex - cgroups *configs.Cgroup + cgroups *cgroups.Cgroup paths map[string]string } -func NewManager(cg *configs.Cgroup, paths map[string]string) (*Manager, error) { +func NewManager(cg *cgroups.Cgroup, paths map[string]string) (*Manager, error) { // Some v1 controllers (cpu, cpuset, and devices) expect // cgroups.Resources to not be nil in Apply. if cg.Resources == nil { - return nil, errors.New("cgroup v1 manager needs configs.Resources to be set during manager creation") + return nil, errors.New("cgroup v1 manager needs cgroups.Resources to be set during manager creation") } if cg.Resources.Unified != nil { return nil, cgroups.ErrV1NoUnified @@ -168,7 +167,7 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { return stats, nil } -func (m *Manager) Set(r *configs.Resources) error { +func (m *Manager) Set(r *cgroups.Resources) error { if r == nil { return nil } @@ -203,7 +202,7 @@ func (m *Manager) Set(r *configs.Resources) error { // Freeze toggles the container's freezer cgroup depending on the state // provided -func (m *Manager) Freeze(state configs.FreezerState) error { +func (m *Manager) Freeze(state cgroups.FreezerState) error { path := m.Path("freezer") if path == "" { return errors.New("cannot toggle freezer: cgroups not configured for container") @@ -233,15 +232,15 @@ func (m *Manager) GetPaths() map[string]string { return m.paths } -func (m *Manager) GetCgroups() (*configs.Cgroup, error) { +func (m *Manager) GetCgroups() (*cgroups.Cgroup, error) { return m.cgroups, nil } -func (m *Manager) GetFreezerState() (configs.FreezerState, error) { +func (m *Manager) GetFreezerState() (cgroups.FreezerState, error) { dir := m.Path("freezer") // If the container doesn't have the freezer cgroup, say it's undefined. if dir == "" { - return configs.Undefined, nil + return cgroups.Undefined, nil } freezer := &FreezerGroup{} return freezer.GetState(dir) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/vendor/github.com/opencontainers/cgroups/fs/hugetlb.go similarity index 83% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go rename to vendor/github.com/opencontainers/cgroups/fs/hugetlb.go index 50f8f30cd39..698fd691e10 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go +++ b/vendor/github.com/opencontainers/cgroups/fs/hugetlb.go @@ -5,9 +5,8 @@ import ( "os" "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) type HugetlbGroup struct{} @@ -16,11 +15,11 @@ func (s *HugetlbGroup) Name() string { return "hugetlb" } -func (s *HugetlbGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *HugetlbGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *HugetlbGroup) Set(path string, r *configs.Resources) error { +func (s *HugetlbGroup) Set(path string, r *cgroups.Resources) error { const suffix = ".limit_in_bytes" skipRsvd := false diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/vendor/github.com/opencontainers/cgroups/fs/memory.go similarity index 96% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go rename to vendor/github.com/opencontainers/cgroups/fs/memory.go index 0abea63f92a..d92f2322beb 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go +++ b/vendor/github.com/opencontainers/cgroups/fs/memory.go @@ -12,9 +12,8 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) const ( @@ -30,7 +29,7 @@ func (s *MemoryGroup) Name() string { return "memory" } -func (s *MemoryGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *MemoryGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } @@ -66,7 +65,7 @@ func setSwap(path string, val int64) error { return cgroups.WriteFile(path, cgroupMemorySwapLimit, strconv.FormatInt(val, 10)) } -func setMemoryAndSwap(path string, r *configs.Resources) error { +func setMemoryAndSwap(path string, r *cgroups.Resources) error { // If the memory update is set to -1 and the swap is not explicitly // set, we should also set swap to -1, it means unlimited memory. if r.Memory == -1 && r.MemorySwap == 0 { @@ -108,7 +107,7 @@ func setMemoryAndSwap(path string, r *configs.Resources) error { return nil } -func (s *MemoryGroup) Set(path string, r *configs.Resources) error { +func (s *MemoryGroup) Set(path string, r *cgroups.Resources) error { if err := setMemoryAndSwap(path, r); err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/vendor/github.com/opencontainers/cgroups/fs/name.go similarity index 60% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go rename to vendor/github.com/opencontainers/cgroups/fs/name.go index b8d5d849c52..28643519b58 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go +++ b/vendor/github.com/opencontainers/cgroups/fs/name.go @@ -1,8 +1,7 @@ package fs import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type NameGroup struct { @@ -14,7 +13,7 @@ func (s *NameGroup) Name() string { return s.GroupName } -func (s *NameGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *NameGroup) Apply(path string, _ *cgroups.Resources, pid int) error { if s.Join { // Ignore errors if the named cgroup does not exist. _ = apply(path, pid) @@ -22,7 +21,7 @@ func (s *NameGroup) Apply(path string, _ *configs.Resources, pid int) error { return nil } -func (s *NameGroup) Set(_ string, _ *configs.Resources) error { +func (s *NameGroup) Set(_ string, _ *cgroups.Resources) error { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/vendor/github.com/opencontainers/cgroups/fs/net_cls.go similarity index 64% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go rename to vendor/github.com/opencontainers/cgroups/fs/net_cls.go index abfd09ce83a..2bd6c5ab218 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go +++ b/vendor/github.com/opencontainers/cgroups/fs/net_cls.go @@ -3,8 +3,7 @@ package fs import ( "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type NetClsGroup struct{} @@ -13,11 +12,11 @@ func (s *NetClsGroup) Name() string { return "net_cls" } -func (s *NetClsGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *NetClsGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *NetClsGroup) Set(path string, r *configs.Resources) error { +func (s *NetClsGroup) Set(path string, r *cgroups.Resources) error { if r.NetClsClassid != 0 { if err := cgroups.WriteFile(path, "net_cls.classid", strconv.FormatUint(uint64(r.NetClsClassid), 10)); err != nil { return err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/vendor/github.com/opencontainers/cgroups/fs/net_prio.go similarity index 64% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go rename to vendor/github.com/opencontainers/cgroups/fs/net_prio.go index da74d377989..b51682b6da0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go +++ b/vendor/github.com/opencontainers/cgroups/fs/net_prio.go @@ -1,8 +1,7 @@ package fs import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type NetPrioGroup struct{} @@ -11,11 +10,11 @@ func (s *NetPrioGroup) Name() string { return "net_prio" } -func (s *NetPrioGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *NetPrioGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *NetPrioGroup) Set(path string, r *configs.Resources) error { +func (s *NetPrioGroup) Set(path string, r *cgroups.Resources) error { for _, prioMap := range r.NetPrioIfpriomap { if err := cgroups.WriteFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil { return err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go b/vendor/github.com/opencontainers/cgroups/fs/paths.go similarity index 82% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go rename to vendor/github.com/opencontainers/cgroups/fs/paths.go index 5f119bac31b..edbe041ea8d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go +++ b/vendor/github.com/opencontainers/cgroups/fs/paths.go @@ -8,9 +8,8 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/internal/path" ) // The absolute path to the root of the cgroup hierarchies. @@ -21,13 +20,13 @@ var ( const defaultCgroupRoot = "/sys/fs/cgroup" -func initPaths(cg *configs.Cgroup) (map[string]string, error) { +func initPaths(cg *cgroups.Cgroup) (map[string]string, error) { root, err := rootPath() if err != nil { return nil, err } - inner, err := innerPath(cg) + inner, err := path.Inner(cg) if err != nil { return nil, err } @@ -136,22 +135,6 @@ func rootPath() (string, error) { return cgroupRoot, nil } -func innerPath(c *configs.Cgroup) (string, error) { - if (c.Name != "" || c.Parent != "") && c.Path != "" { - return "", errors.New("cgroup: either Path or Name and Parent should be used") - } - - // XXX: Do not remove CleanPath. Path safety is important! -- cyphar - innerPath := utils.CleanPath(c.Path) - if innerPath == "" { - cgParent := utils.CleanPath(c.Parent) - cgName := utils.CleanPath(c.Name) - innerPath = filepath.Join(cgParent, cgName) - } - - return innerPath, nil -} - func subsysPath(root, inner, subsystem string) (string, error) { // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. if filepath.IsAbs(inner) { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/vendor/github.com/opencontainers/cgroups/fs/perf_event.go similarity index 53% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go rename to vendor/github.com/opencontainers/cgroups/fs/perf_event.go index b86955c8f31..929c412a3a7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go +++ b/vendor/github.com/opencontainers/cgroups/fs/perf_event.go @@ -1,8 +1,7 @@ package fs import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) type PerfEventGroup struct{} @@ -11,11 +10,11 @@ func (s *PerfEventGroup) Name() string { return "perf_event" } -func (s *PerfEventGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *PerfEventGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *PerfEventGroup) Set(_ string, _ *configs.Resources) error { +func (s *PerfEventGroup) Set(_ string, _ *cgroups.Resources) error { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/vendor/github.com/opencontainers/cgroups/fs/pids.go similarity index 77% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go rename to vendor/github.com/opencontainers/cgroups/fs/pids.go index 1f13532a5ad..9319761e6ad 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go +++ b/vendor/github.com/opencontainers/cgroups/fs/pids.go @@ -4,9 +4,8 @@ import ( "math" "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) type PidsGroup struct{} @@ -15,11 +14,11 @@ func (s *PidsGroup) Name() string { return "pids" } -func (s *PidsGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *PidsGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *PidsGroup) Set(path string, r *configs.Resources) error { +func (s *PidsGroup) Set(path string, r *cgroups.Resources) error { if r.PidsLimit != 0 { // "max" is the fallback value. limit := "max" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go b/vendor/github.com/opencontainers/cgroups/fs/rdma.go similarity index 50% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go rename to vendor/github.com/opencontainers/cgroups/fs/rdma.go index 5bbe0f35f81..4b175365f27 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go +++ b/vendor/github.com/opencontainers/cgroups/fs/rdma.go @@ -1,9 +1,8 @@ package fs import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) type RdmaGroup struct{} @@ -12,11 +11,11 @@ func (s *RdmaGroup) Name() string { return "rdma" } -func (s *RdmaGroup) Apply(path string, _ *configs.Resources, pid int) error { +func (s *RdmaGroup) Apply(path string, _ *cgroups.Resources, pid int) error { return apply(path, pid) } -func (s *RdmaGroup) Set(path string, r *configs.Resources) error { +func (s *RdmaGroup) Set(path string, r *cgroups.Resources) error { return fscommon.RdmaSet(path, r) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go b/vendor/github.com/opencontainers/cgroups/fs2/cpu.go similarity index 90% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go rename to vendor/github.com/opencontainers/cgroups/fs2/cpu.go index 8ee49d499f1..8eae67351cf 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/cpu.go @@ -8,17 +8,16 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) -func isCpuSet(r *configs.Resources) bool { +func isCPUSet(r *cgroups.Resources) bool { return r.CpuWeight != 0 || r.CpuQuota != 0 || r.CpuPeriod != 0 || r.CPUIdle != nil || r.CpuBurst != nil } -func setCpu(dirPath string, r *configs.Resources) error { - if !isCpuSet(r) { +func setCPU(dirPath string, r *cgroups.Resources) error { + if !isCPUSet(r) { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go b/vendor/github.com/opencontainers/cgroups/fs2/cpuset.go similarity index 64% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go rename to vendor/github.com/opencontainers/cgroups/fs2/cpuset.go index 16c45bad8f0..9399919a062 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/cpuset.go @@ -1,15 +1,14 @@ package fs2 import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) -func isCpusetSet(r *configs.Resources) bool { +func isCpusetSet(r *cgroups.Resources) bool { return r.CpusetCpus != "" || r.CpusetMems != "" } -func setCpuset(dirPath string, r *configs.Resources) error { +func setCpuset(dirPath string, r *cgroups.Resources) error { if !isCpusetSet(r) { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go b/vendor/github.com/opencontainers/cgroups/fs2/create.go similarity index 91% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go rename to vendor/github.com/opencontainers/cgroups/fs2/create.go index 641123a4d84..565ca883079 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/create.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/create.go @@ -6,8 +6,7 @@ import ( "path/filepath" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) func supportedControllers() (string, error) { @@ -18,7 +17,7 @@ func supportedControllers() (string, error) { // based on (1) controllers available and (2) resources that are being set. // We don't check "pseudo" controllers such as // "freezer" and "devices". -func needAnyControllers(r *configs.Resources) (bool, error) { +func needAnyControllers(r *cgroups.Resources) (bool, error) { if r == nil { return false, nil } @@ -48,7 +47,7 @@ func needAnyControllers(r *configs.Resources) (bool, error) { if isIoSet(r) && have("io") { return true, nil } - if isCpuSet(r) && have("cpu") { + if isCPUSet(r) && have("cpu") { return true, nil } if isCpusetSet(r) && have("cpuset") { @@ -64,12 +63,12 @@ func needAnyControllers(r *configs.Resources) (bool, error) { // containsDomainController returns whether the current config contains domain controller or not. // Refer to: http://man7.org/linux/man-pages/man7/cgroups.7.html // As at Linux 4.19, the following controllers are threaded: cpu, perf_event, and pids. -func containsDomainController(r *configs.Resources) bool { - return isMemorySet(r) || isIoSet(r) || isCpuSet(r) || isHugeTlbSet(r) +func containsDomainController(r *cgroups.Resources) bool { + return isMemorySet(r) || isIoSet(r) || isCPUSet(r) || isHugeTlbSet(r) } // CreateCgroupPath creates cgroupv2 path, enabling all the supported controllers. -func CreateCgroupPath(path string, c *configs.Cgroup) (Err error) { +func CreateCgroupPath(path string, c *cgroups.Cgroup) (Err error) { if !strings.HasPrefix(path, UnifiedMountpoint) { return fmt.Errorf("invalid cgroup path %s", path) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go b/vendor/github.com/opencontainers/cgroups/fs2/defaultpath.go similarity index 57% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go rename to vendor/github.com/opencontainers/cgroups/fs2/defaultpath.go index 8ac8312017b..0bc479de3a5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/defaultpath.go @@ -19,40 +19,25 @@ package fs2 import ( "bufio" "errors" - "fmt" "io" "os" "path/filepath" "strings" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/internal/path" ) const UnifiedMountpoint = "/sys/fs/cgroup" -func defaultDirPath(c *configs.Cgroup) (string, error) { - if (c.Name != "" || c.Parent != "") && c.Path != "" { - return "", fmt.Errorf("cgroup: either Path or Name and Parent should be used, got %+v", c) - } - - return _defaultDirPath(UnifiedMountpoint, c.Path, c.Parent, c.Name) -} - -func _defaultDirPath(root, cgPath, cgParent, cgName string) (string, error) { - if (cgName != "" || cgParent != "") && cgPath != "" { - return "", errors.New("cgroup: either Path or Name and Parent should be used") +func defaultDirPath(c *cgroups.Cgroup) (string, error) { + innerPath, err := path.Inner(c) + if err != nil { + return "", err } - // XXX: Do not remove CleanPath. Path safety is important! -- cyphar - innerPath := utils.CleanPath(cgPath) - if innerPath == "" { - cgParent := utils.CleanPath(cgParent) - cgName := utils.CleanPath(cgName) - innerPath = filepath.Join(cgParent, cgName) - } if filepath.IsAbs(innerPath) { - return filepath.Join(root, innerPath), nil + return filepath.Join(UnifiedMountpoint, innerPath), nil } // we don't need to use /proc/thread-self here because runc always runs @@ -67,7 +52,7 @@ func _defaultDirPath(root, cgPath, cgParent, cgName string) (string, error) { // A parent cgroup (with no tasks in it) is what we need. ownCgroup = filepath.Dir(ownCgroup) - return filepath.Join(root, ownCgroup, innerPath), nil + return filepath.Join(UnifiedMountpoint, ownCgroup, innerPath), nil } // parseCgroupFile parses /proc/PID/cgroup file and return string @@ -83,16 +68,9 @@ func parseCgroupFile(path string) (string, error) { func parseCgroupFromReader(r io.Reader) (string, error) { s := bufio.NewScanner(r) for s.Scan() { - var ( - text = s.Text() - parts = strings.SplitN(text, ":", 3) - ) - if len(parts) < 3 { - return "", fmt.Errorf("invalid cgroup entry: %q", text) - } - // text is like "0::/user.slice/user-1001.slice/session-1.scope" - if parts[0] == "0" && parts[1] == "" { - return parts[2], nil + // "0::/user.slice/user-1001.slice/session-1.scope" + if path, ok := strings.CutPrefix(s.Text(), "0::"); ok { + return path, nil } } if err := s.Err(); err != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go b/vendor/github.com/opencontainers/cgroups/fs2/freezer.go similarity index 71% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go rename to vendor/github.com/opencontainers/cgroups/fs2/freezer.go index 8917a6411d6..f0192f0955d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/freezer.go @@ -10,18 +10,17 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) -func setFreezer(dirPath string, state configs.FreezerState) error { +func setFreezer(dirPath string, state cgroups.FreezerState) error { var stateStr string switch state { - case configs.Undefined: + case cgroups.Undefined: return nil - case configs.Frozen: + case cgroups.Frozen: stateStr = "1" - case configs.Thawed: + case cgroups.Thawed: stateStr = "0" default: return fmt.Errorf("invalid freezer state %q requested", state) @@ -32,7 +31,7 @@ func setFreezer(dirPath string, state configs.FreezerState) error { // We can ignore this request as long as the user didn't ask us to // freeze the container (since without the freezer cgroup, that's a // no-op). - if state != configs.Frozen { + if state != cgroups.Frozen { return nil } return fmt.Errorf("freezer not supported: %w", err) @@ -51,7 +50,7 @@ func setFreezer(dirPath string, state configs.FreezerState) error { return nil } -func getFreezer(dirPath string) (configs.FreezerState, error) { +func getFreezer(dirPath string) (cgroups.FreezerState, error) { fd, err := cgroups.OpenFile(dirPath, "cgroup.freeze", unix.O_RDONLY) if err != nil { // If the kernel is too old, then we just treat the freezer as being in @@ -59,36 +58,36 @@ func getFreezer(dirPath string) (configs.FreezerState, error) { if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) { err = nil } - return configs.Undefined, err + return cgroups.Undefined, err } defer fd.Close() return readFreezer(dirPath, fd) } -func readFreezer(dirPath string, fd *os.File) (configs.FreezerState, error) { +func readFreezer(dirPath string, fd *os.File) (cgroups.FreezerState, error) { if _, err := fd.Seek(0, 0); err != nil { - return configs.Undefined, err + return cgroups.Undefined, err } state := make([]byte, 2) if _, err := fd.Read(state); err != nil { - return configs.Undefined, err + return cgroups.Undefined, err } switch string(state) { case "0\n": - return configs.Thawed, nil + return cgroups.Thawed, nil case "1\n": return waitFrozen(dirPath) default: - return configs.Undefined, fmt.Errorf(`unknown "cgroup.freeze" state: %q`, state) + return cgroups.Undefined, fmt.Errorf(`unknown "cgroup.freeze" state: %q`, state) } } // waitFrozen polls cgroup.events until it sees "frozen 1" in it. -func waitFrozen(dirPath string) (configs.FreezerState, error) { +func waitFrozen(dirPath string) (cgroups.FreezerState, error) { fd, err := cgroups.OpenFile(dirPath, "cgroup.events", unix.O_RDONLY) if err != nil { - return configs.Undefined, err + return cgroups.Undefined, err } defer fd.Close() @@ -103,13 +102,11 @@ func waitFrozen(dirPath string) (configs.FreezerState, error) { scanner := bufio.NewScanner(fd) for i := 0; scanner.Scan(); { if i == maxIter { - return configs.Undefined, fmt.Errorf("timeout of %s reached waiting for the cgroup to freeze", waitTime*maxIter) + return cgroups.Undefined, fmt.Errorf("timeout of %s reached waiting for the cgroup to freeze", waitTime*maxIter) } - line := scanner.Text() - val := strings.TrimPrefix(line, "frozen ") - if val != line { // got prefix + if val, ok := strings.CutPrefix(scanner.Text(), "frozen "); ok { if val[0] == '1' { - return configs.Frozen, nil + return cgroups.Frozen, nil } i++ @@ -117,11 +114,11 @@ func waitFrozen(dirPath string) (configs.FreezerState, error) { time.Sleep(waitTime) _, err := fd.Seek(0, 0) if err != nil { - return configs.Undefined, err + return cgroups.Undefined, err } } } // Should only reach here either on read error, // or if the file does not contain "frozen " line. - return configs.Undefined, scanner.Err() + return cgroups.Undefined, scanner.Err() } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go b/vendor/github.com/opencontainers/cgroups/fs2/fs2.go similarity index 91% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go rename to vendor/github.com/opencontainers/cgroups/fs2/fs2.go index 93f81bf8dae..c5d5a1f8ec3 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/fs2.go @@ -6,15 +6,14 @@ import ( "os" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) type parseError = fscommon.ParseError type Manager struct { - config *configs.Cgroup + config *cgroups.Cgroup // dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope" dirPath string // controllers is content of "cgroup.controllers" file. @@ -25,7 +24,7 @@ type Manager struct { // NewManager creates a manager for cgroup v2 unified hierarchy. // dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope". // If dirPath is empty, it is automatically set using config. -func NewManager(config *configs.Cgroup, dirPath string) (*Manager, error) { +func NewManager(config *cgroups.Cgroup, dirPath string) (*Manager, error) { if dirPath == "" { var err error dirPath, err = defaultDirPath(config) @@ -143,7 +142,7 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) { return st, nil } -func (m *Manager) Freeze(state configs.FreezerState) error { +func (m *Manager) Freeze(state cgroups.FreezerState) error { if m.config.Resources == nil { return errors.New("cannot toggle freezer: cgroups not configured for container") } @@ -162,7 +161,7 @@ func (m *Manager) Path(_ string) string { return m.dirPath } -func (m *Manager) Set(r *configs.Resources) error { +func (m *Manager) Set(r *cgroups.Resources) error { if r == nil { return nil } @@ -182,7 +181,7 @@ func (m *Manager) Set(r *configs.Resources) error { return err } // cpu (since kernel 4.15) - if err := setCpu(m.dirPath, r); err != nil { + if err := setCPU(m.dirPath, r); err != nil { return err } // devices (since kernel 4.15, pseudo-controller) @@ -218,7 +217,7 @@ func (m *Manager) Set(r *configs.Resources) error { return nil } -func setDevices(dirPath string, r *configs.Resources) error { +func setDevices(dirPath string, r *cgroups.Resources) error { if cgroups.DevicesSetV2 == nil { if len(r.Devices) > 0 { return cgroups.ErrDevicesUnsupported @@ -238,11 +237,10 @@ func (m *Manager) setUnified(res map[string]string) error { if errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrNotExist) { // Check if a controller is available, // to give more specific error if not. - sk := strings.SplitN(k, ".", 2) - if len(sk) != 2 { + c, _, ok := strings.Cut(k, ".") + if !ok { return fmt.Errorf("unified resource %q must be in the form CONTROLLER.PARAMETER", k) } - c := sk[0] if _, ok := m.controllers[c]; !ok && c != "cgroup" { return fmt.Errorf("unified resource %q can't be set: controller %q not available", k, c) } @@ -260,11 +258,11 @@ func (m *Manager) GetPaths() map[string]string { return paths } -func (m *Manager) GetCgroups() (*configs.Cgroup, error) { +func (m *Manager) GetCgroups() (*cgroups.Cgroup, error) { return m.config, nil } -func (m *Manager) GetFreezerState() (configs.FreezerState, error) { +func (m *Manager) GetFreezerState() (cgroups.FreezerState, error) { return getFreezer(m.dirPath) } @@ -285,7 +283,7 @@ func (m *Manager) OOMKillCount() (uint64, error) { return c, err } -func CheckMemoryUsage(dirPath string, r *configs.Resources) error { +func CheckMemoryUsage(dirPath string, r *cgroups.Resources) error { if !r.MemoryCheckBeforeUpdate { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go b/vendor/github.com/opencontainers/cgroups/fs2/hugetlb.go similarity index 81% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go rename to vendor/github.com/opencontainers/cgroups/fs2/hugetlb.go index 2ce2631e187..8e1ac874a58 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/hugetlb.go @@ -5,16 +5,15 @@ import ( "os" "strconv" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) -func isHugeTlbSet(r *configs.Resources) bool { +func isHugeTlbSet(r *cgroups.Resources) bool { return len(r.HugetlbLimit) > 0 } -func setHugeTlb(dirPath string, r *configs.Resources) error { +func setHugeTlb(dirPath string, r *cgroups.Resources) error { if !isHugeTlbSet(r) { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go b/vendor/github.com/opencontainers/cgroups/fs2/io.go similarity index 95% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go rename to vendor/github.com/opencontainers/cgroups/fs2/io.go index b2ff7d34086..0f6ef7fea55 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/io.go @@ -10,11 +10,10 @@ import ( "github.com/sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" ) -func isIoSet(r *configs.Resources) bool { +func isIoSet(r *cgroups.Resources) bool { return r.BlkioWeight != 0 || len(r.BlkioWeightDevice) > 0 || len(r.BlkioThrottleReadBpsDevice) > 0 || @@ -37,7 +36,7 @@ func bfqDeviceWeightSupported(bfq *os.File) bool { return err != nil } -func setIo(dirPath string, r *configs.Resources) error { +func setIo(dirPath string, r *cgroups.Resources) error { if !isIoSet(r) { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go b/vendor/github.com/opencontainers/cgroups/fs2/memory.go similarity index 95% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go rename to vendor/github.com/opencontainers/cgroups/fs2/memory.go index df8336ba0f8..d67fd8aba55 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/memory.go @@ -10,9 +10,8 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) // numToStr converts an int64 value to a string for writing to a @@ -32,11 +31,11 @@ func numToStr(value int64) (ret string) { return ret } -func isMemorySet(r *configs.Resources) bool { +func isMemorySet(r *cgroups.Resources) bool { return r.MemoryReservation != 0 || r.Memory != 0 || r.MemorySwap != 0 } -func setMemory(dirPath string, r *configs.Resources) error { +func setMemory(dirPath string, r *cgroups.Resources) error { if !isMemorySet(r) { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/misc.go b/vendor/github.com/opencontainers/cgroups/fs2/misc.go similarity index 87% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/misc.go rename to vendor/github.com/opencontainers/cgroups/fs2/misc.go index f0b292aa015..f20136b66dc 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/misc.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/misc.go @@ -5,8 +5,8 @@ import ( "os" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) func statMisc(dirPath string, stats *cgroups.Stats) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go b/vendor/github.com/opencontainers/cgroups/fs2/pids.go similarity index 84% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go rename to vendor/github.com/opencontainers/cgroups/fs2/pids.go index c8c4a365894..9b82b90115e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/pids.go @@ -8,16 +8,15 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" - "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/cgroups" + "github.com/opencontainers/cgroups/fscommon" ) -func isPidsSet(r *configs.Resources) bool { +func isPidsSet(r *cgroups.Resources) bool { return r.PidsLimit != 0 } -func setPids(dirPath string, r *configs.Resources) error { +func setPids(dirPath string, r *cgroups.Resources) error { if !isPidsSet(r) { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/psi.go b/vendor/github.com/opencontainers/cgroups/fs2/psi.go similarity index 82% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/psi.go rename to vendor/github.com/opencontainers/cgroups/fs2/psi.go index 09f34888516..010fe0bff27 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/psi.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/psi.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/unix" - "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/cgroups" ) func statPSI(dirPath string, file string) (*cgroups.PSIStats, error) { @@ -58,12 +58,12 @@ func statPSI(dirPath string, file string) (*cgroups.PSIStats, error) { func parsePSIData(psi []string) (cgroups.PSIData, error) { data := cgroups.PSIData{} for _, f := range psi { - kv := strings.SplitN(f, "=", 2) - if len(kv) != 2 { + key, val, ok := strings.Cut(f, "=") + if !ok { return data, fmt.Errorf("invalid psi data: %q", f) } var pv *float64 - switch kv[0] { + switch key { case "avg10": pv = &data.Avg10 case "avg60": @@ -71,16 +71,16 @@ func parsePSIData(psi []string) (cgroups.PSIData, error) { case "avg300": pv = &data.Avg300 case "total": - v, err := strconv.ParseUint(kv[1], 10, 64) + v, err := strconv.ParseUint(val, 10, 64) if err != nil { - return data, fmt.Errorf("invalid %s PSI value: %w", kv[0], err) + return data, fmt.Errorf("invalid %s PSI value: %w", key, err) } data.Total = v } if pv != nil { - v, err := strconv.ParseFloat(kv[1], 64) + v, err := strconv.ParseFloat(val, 64) if err != nil { - return data, fmt.Errorf("invalid %s PSI value: %w", kv[0], err) + return data, fmt.Errorf("invalid %s PSI value: %w", key, err) } *pv = v } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go b/vendor/github.com/opencontainers/cgroups/fscommon/rdma.go similarity index 86% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go rename to vendor/github.com/opencontainers/cgroups/fscommon/rdma.go index d463d15ee4e..86e38fdcbaa 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go +++ b/vendor/github.com/opencontainers/cgroups/fscommon/rdma.go @@ -8,23 +8,21 @@ import ( "strconv" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" "golang.org/x/sys/unix" + + "github.com/opencontainers/cgroups" ) // parseRdmaKV parses raw string to RdmaEntry. func parseRdmaKV(raw string, entry *cgroups.RdmaEntry) error { var value uint32 - parts := strings.SplitN(raw, "=", 3) + k, v, ok := strings.Cut(raw, "=") - if len(parts) != 2 { + if !ok { return errors.New("Unable to parse RDMA entry") } - k, v := parts[0], parts[1] - if v == "max" { value = math.MaxUint32 } else { @@ -34,9 +32,10 @@ func parseRdmaKV(raw string, entry *cgroups.RdmaEntry) error { } value = uint32(val64) } - if k == "hca_handle" { + switch k { + case "hca_handle": entry.HcaHandles = value - } else if k == "hca_object" { + case "hca_object": entry.HcaObjects = value } @@ -99,7 +98,7 @@ func RdmaGetStats(path string, stats *cgroups.Stats) error { return nil } -func createCmdString(device string, limits configs.LinuxRdma) string { +func createCmdString(device string, limits cgroups.LinuxRdma) string { cmdString := device if limits.HcaHandles != nil { cmdString += " hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10) @@ -111,7 +110,7 @@ func createCmdString(device string, limits configs.LinuxRdma) string { } // RdmaSet sets RDMA resources. -func RdmaSet(path string, r *configs.Resources) error { +func RdmaSet(path string, r *cgroups.Resources) error { for device, limits := range r.Rdma { if err := cgroups.WriteFile(path, "rdma.max", createCmdString(device, limits)); err != nil { return err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go b/vendor/github.com/opencontainers/cgroups/fscommon/utils.go similarity index 79% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go rename to vendor/github.com/opencontainers/cgroups/fscommon/utils.go index f4a51c9e56f..d8f8dfc0237 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go +++ b/vendor/github.com/opencontainers/cgroups/fscommon/utils.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/cgroups" ) var ( @@ -54,38 +54,39 @@ func ParseUint(s string, base, bitSize int) (uint64, error) { return value, nil } -// ParseKeyValue parses a space-separated "name value" kind of cgroup +// ParseKeyValue parses a space-separated "key value" kind of cgroup // parameter and returns its key as a string, and its value as uint64 -// (ParseUint is used to convert the value). For example, +// (using [ParseUint] to convert the value). For example, // "io_service_bytes 1234" will be returned as "io_service_bytes", 1234. func ParseKeyValue(t string) (string, uint64, error) { - parts := strings.SplitN(t, " ", 3) - if len(parts) != 2 { - return "", 0, fmt.Errorf("line %q is not in key value format", t) + key, val, ok := strings.Cut(t, " ") + if !ok || key == "" || val == "" { + return "", 0, fmt.Errorf(`line %q is not in "key value" format`, t) } - value, err := ParseUint(parts[1], 10, 64) + value, err := ParseUint(val, 10, 64) if err != nil { return "", 0, err } - return parts[0], value, nil + return key, value, nil } -// GetValueByKey reads a key-value pairs from the specified cgroup file, -// and returns a value of the specified key. ParseUint is used for value -// conversion. +// GetValueByKey reads space-separated "key value" pairs from the specified +// cgroup file, looking for a specified key, and returns its value as uint64, +// using [ParseUint] for conversion. If the value is not found, 0 is returned. func GetValueByKey(path, file, key string) (uint64, error) { content, err := cgroups.ReadFile(path, file) if err != nil { return 0, err } + key += " " lines := strings.Split(content, "\n") for _, line := range lines { - arr := strings.Split(line, " ") - if len(arr) == 2 && arr[0] == key { - val, err := ParseUint(arr[1], 10, 64) + v, ok := strings.CutPrefix(line, key) + if ok { + val, err := ParseUint(v, 10, 64) if err != nil { err = &ParseError{Path: path, File: file, Err: err} } @@ -103,7 +104,6 @@ func GetCgroupParamUint(path, file string) (uint64, error) { if err != nil { return 0, err } - contents = strings.TrimSpace(contents) if contents == "max" { return math.MaxUint64, nil } @@ -118,11 +118,10 @@ func GetCgroupParamUint(path, file string) (uint64, error) { // GetCgroupParamInt reads a single int64 value from specified cgroup file. // If the value read is "max", the math.MaxInt64 is returned. func GetCgroupParamInt(path, file string) (int64, error) { - contents, err := cgroups.ReadFile(path, file) + contents, err := GetCgroupParamString(path, file) if err != nil { return 0, err } - contents = strings.TrimSpace(contents) if contents == "max" { return math.MaxInt64, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go b/vendor/github.com/opencontainers/cgroups/getallpids.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go rename to vendor/github.com/opencontainers/cgroups/getallpids.go diff --git a/vendor/github.com/opencontainers/cgroups/internal/path/path.go b/vendor/github.com/opencontainers/cgroups/internal/path/path.go new file mode 100644 index 00000000000..a105a7cf48d --- /dev/null +++ b/vendor/github.com/opencontainers/cgroups/internal/path/path.go @@ -0,0 +1,52 @@ +package path + +import ( + "errors" + "os" + "path/filepath" + + "github.com/opencontainers/cgroups" +) + +// Inner returns a path to cgroup relative to a cgroup mount point, based +// on cgroup configuration, or an error, if cgroup configuration is invalid. +// To be used only by fs cgroup managers (systemd has different path rules). +func Inner(c *cgroups.Cgroup) (string, error) { + if (c.Name != "" || c.Parent != "") && c.Path != "" { + return "", errors.New("cgroup: either Path or Name and Parent should be used") + } + + // XXX: Do not remove cleanPath. Path safety is important! -- cyphar + innerPath := cleanPath(c.Path) + if innerPath == "" { + cgParent := cleanPath(c.Parent) + cgName := cleanPath(c.Name) + innerPath = filepath.Join(cgParent, cgName) + } + + return innerPath, nil +} + +// cleanPath is a copy of github.com/opencontainers/runc/libcontainer/utils.CleanPath. +func cleanPath(path string) string { + // Deal with empty strings nicely. + if path == "" { + return "" + } + + // Ensure that all paths are cleaned (especially problematic ones like + // "/../../../../../" which can cause lots of issues). + + if filepath.IsAbs(path) { + return filepath.Clean(path) + } + + // If the path isn't absolute, we need to do more processing to fix paths + // such as "../../../..//some/path". We also shouldn't convert absolute + // paths to relative ones. + path = filepath.Clean(string(os.PathSeparator) + path) + // This can't fail, as (by definition) all paths are relative to root. + path, _ = filepath.Rel(string(os.PathSeparator), path) + + return path +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/cgroups/stats.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go rename to vendor/github.com/opencontainers/cgroups/stats.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/cgroups/utils.go similarity index 99% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go rename to vendor/github.com/opencontainers/cgroups/utils.go index d404647c8c0..9ef24b1ac6e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/cgroups/utils.go @@ -332,8 +332,8 @@ func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { for _, file := range fileNames { // example: hugepages-1048576kB - val := strings.TrimPrefix(file, "hugepages-") - if len(val) == len(file) { + val, ok := strings.CutPrefix(file, "hugepages-") + if !ok { // Unexpected file name: no prefix found, ignore it. continue } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go b/vendor/github.com/opencontainers/cgroups/v1_utils.go similarity index 100% rename from vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go rename to vendor/github.com/opencontainers/cgroups/v1_utils.go diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go deleted file mode 100644 index 22fe0f9b4c1..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ /dev/null @@ -1,508 +0,0 @@ -package configs - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "time" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" - - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runtime-spec/specs-go" -) - -type Rlimit struct { - Type int `json:"type"` - Hard uint64 `json:"hard"` - Soft uint64 `json:"soft"` -} - -// IDMap represents UID/GID Mappings for User Namespaces. -type IDMap struct { - ContainerID int64 `json:"container_id"` - HostID int64 `json:"host_id"` - Size int64 `json:"size"` -} - -// Seccomp represents syscall restrictions -// By default, only the native architecture of the kernel is allowed to be used -// for syscalls. Additional architectures can be added by specifying them in -// Architectures. -type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Flags []specs.LinuxSeccompFlag `json:"flags"` - Syscalls []*Syscall `json:"syscalls"` - DefaultErrnoRet *uint `json:"default_errno_ret"` - ListenerPath string `json:"listener_path,omitempty"` - ListenerMetadata string `json:"listener_metadata,omitempty"` -} - -// Action is taken upon rule match in Seccomp -type Action int - -const ( - Kill Action = iota + 1 - Errno - Trap - Allow - Trace - Log - Notify - KillThread - KillProcess -) - -// Operator is a comparison operator to be used when matching syscall arguments in Seccomp -type Operator int - -const ( - EqualTo Operator = iota + 1 - NotEqualTo - GreaterThan - GreaterThanOrEqualTo - LessThan - LessThanOrEqualTo - MaskEqualTo -) - -// Arg is a rule to match a specific syscall argument in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"value_two"` - Op Operator `json:"op"` -} - -// Syscall is a rule to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - ErrnoRet *uint `json:"errnoRet"` - Args []*Arg `json:"args"` -} - -// Config defines configuration options for executing a process inside a contained environment. -type Config struct { - // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs - // This is a common option when the container is running in ramdisk - NoPivotRoot bool `json:"no_pivot_root"` - - // ParentDeathSignal specifies the signal that is sent to the container's process in the case - // that the parent process dies. - ParentDeathSignal int `json:"parent_death_signal"` - - // Path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - - // Umask is the umask to use inside of the container. - Umask *uint32 `json:"umask"` - - // Readonlyfs will remount the container's rootfs as readonly where only externally mounted - // bind mounts are writtable. - Readonlyfs bool `json:"readonlyfs"` - - // Specifies the mount propagation flags to be applied to /. - RootPropagation int `json:"rootPropagation"` - - // Mounts specify additional source and destination paths that will be mounted inside the container's - // rootfs and mount namespace if specified - Mounts []*Mount `json:"mounts"` - - // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! - Devices []*devices.Device `json:"devices"` - - MountLabel string `json:"mount_label"` - - // Hostname optionally sets the container's hostname if provided - Hostname string `json:"hostname"` - - // Domainname optionally sets the container's domainname if provided - Domainname string `json:"domainname"` - - // Namespaces specifies the container's namespaces that it should setup when cloning the init process - // If a namespace is not provided that namespace is shared from the container's parent process - Namespaces Namespaces `json:"namespaces"` - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capabilities not specified will be dropped from the processes capability mask - Capabilities *Capabilities `json:"capabilities"` - - // Networks specifies the container's network setup to be created - Networks []*Network `json:"networks"` - - // Routes can be specified to create entries in the route table as the container is started - Routes []*Route `json:"routes"` - - // Cgroups specifies specific cgroup settings for the various subsystems that the container is - // placed into to limit the resources the container has available - Cgroups *Cgroup `json:"cgroups"` - - // AppArmorProfile specifies the profile to apply to the process running in the container and is - // change at the time the process is execed - AppArmorProfile string `json:"apparmor_profile,omitempty"` - - // ProcessLabel specifies the label to apply to the process running in the container. It is - // commonly used by selinux - ProcessLabel string `json:"process_label,omitempty"` - - // Rlimits specifies the resource limits, such as max open files, to set in the container - // If Rlimits are not set, the container will inherit rlimits from the parent process - Rlimits []Rlimit `json:"rlimits,omitempty"` - - // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores - // for a process. Valid values are between the range [-1000, '1000'], where processes with - // higher scores are preferred for being killed. If it is unset then we don't touch the current - // value. - // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ - OomScoreAdj *int `json:"oom_score_adj,omitempty"` - - // UIDMappings is an array of User ID mappings for User Namespaces - UIDMappings []IDMap `json:"uid_mappings"` - - // GIDMappings is an array of Group ID mappings for User Namespaces - GIDMappings []IDMap `json:"gid_mappings"` - - // MaskPaths specifies paths within the container's rootfs to mask over with a bind - // mount pointing to /dev/null as to prevent reads of the file. - MaskPaths []string `json:"mask_paths"` - - // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only - // so that these files prevent any writes. - ReadonlyPaths []string `json:"readonly_paths"` - - // Sysctl is a map of properties and their values. It is the equivalent of using - // sysctl -w my.property.name value in Linux. - Sysctl map[string]string `json:"sysctl"` - - // Seccomp allows actions to be taken whenever a syscall is made within the container. - // A number of rules are given, each having an action to be taken if a syscall matches it. - // A default action to be taken if no rules match is also given. - Seccomp *Seccomp `json:"seccomp"` - - // NoNewPrivileges controls whether processes in the container can gain additional privileges. - NoNewPrivileges bool `json:"no_new_privileges,omitempty"` - - // Hooks are a collection of actions to perform at various container lifecycle events. - // CommandHooks are serialized to JSON, but other hooks are not. - Hooks Hooks - - // Version is the version of opencontainer specification that is supported. - Version string `json:"version"` - - // Labels are user defined metadata that is stored in the config and populated on the state - Labels []string `json:"labels"` - - // NoNewKeyring will not allocated a new session keyring for the container. It will use the - // callers keyring in this case. - NoNewKeyring bool `json:"no_new_keyring"` - - // IntelRdt specifies settings for Intel RDT group that the container is placed into - // to limit the resources (e.g., L3 cache, memory bandwidth) the container has available - IntelRdt *IntelRdt `json:"intel_rdt,omitempty"` - - // RootlessEUID is set when the runc was launched with non-zero EUID. - // Note that RootlessEUID is set to false when launched with EUID=0 in userns. - // When RootlessEUID is set, runc creates a new userns for the container. - // (config.json needs to contain userns settings) - RootlessEUID bool `json:"rootless_euid,omitempty"` - - // RootlessCgroups is set when unlikely to have the full access to cgroups. - // When RootlessCgroups is set, cgroups errors are ignored. - RootlessCgroups bool `json:"rootless_cgroups,omitempty"` - - // TimeOffsets specifies the offset for supporting time namespaces. - TimeOffsets map[string]specs.LinuxTimeOffset `json:"time_offsets,omitempty"` - - // Scheduler represents the scheduling attributes for a process. - Scheduler *Scheduler `json:"scheduler,omitempty"` - - // Personality contains configuration for the Linux personality syscall. - Personality *LinuxPersonality `json:"personality,omitempty"` - - // IOPriority is the container's I/O priority. - IOPriority *IOPriority `json:"io_priority,omitempty"` -} - -// Scheduler is based on the Linux sched_setattr(2) syscall. -type Scheduler = specs.Scheduler - -// ToSchedAttr is to convert *configs.Scheduler to *unix.SchedAttr. -func ToSchedAttr(scheduler *Scheduler) (*unix.SchedAttr, error) { - var policy uint32 - switch scheduler.Policy { - case specs.SchedOther: - policy = 0 - case specs.SchedFIFO: - policy = 1 - case specs.SchedRR: - policy = 2 - case specs.SchedBatch: - policy = 3 - case specs.SchedISO: - policy = 4 - case specs.SchedIdle: - policy = 5 - case specs.SchedDeadline: - policy = 6 - default: - return nil, fmt.Errorf("invalid scheduler policy: %s", scheduler.Policy) - } - - var flags uint64 - for _, flag := range scheduler.Flags { - switch flag { - case specs.SchedFlagResetOnFork: - flags |= 0x01 - case specs.SchedFlagReclaim: - flags |= 0x02 - case specs.SchedFlagDLOverrun: - flags |= 0x04 - case specs.SchedFlagKeepPolicy: - flags |= 0x08 - case specs.SchedFlagKeepParams: - flags |= 0x10 - case specs.SchedFlagUtilClampMin: - flags |= 0x20 - case specs.SchedFlagUtilClampMax: - flags |= 0x40 - default: - return nil, fmt.Errorf("invalid scheduler flag: %s", flag) - } - } - - return &unix.SchedAttr{ - Size: unix.SizeofSchedAttr, - Policy: policy, - Flags: flags, - Nice: scheduler.Nice, - Priority: uint32(scheduler.Priority), - Runtime: scheduler.Runtime, - Deadline: scheduler.Deadline, - Period: scheduler.Period, - }, nil -} - -var IOPrioClassMapping = map[specs.IOPriorityClass]int{ - specs.IOPRIO_CLASS_RT: 1, - specs.IOPRIO_CLASS_BE: 2, - specs.IOPRIO_CLASS_IDLE: 3, -} - -type IOPriority = specs.LinuxIOPriority - -type ( - HookName string - HookList []Hook - Hooks map[HookName]HookList -) - -const ( - // Prestart commands are executed after the container namespaces are created, - // but before the user supplied command is executed from init. - // Note: This hook is now deprecated - // Prestart commands are called in the Runtime namespace. - Prestart HookName = "prestart" - - // CreateRuntime commands MUST be called as part of the create operation after - // the runtime environment has been created but before the pivot_root has been executed. - // CreateRuntime is called immediately after the deprecated Prestart hook. - // CreateRuntime commands are called in the Runtime Namespace. - CreateRuntime HookName = "createRuntime" - - // CreateContainer commands MUST be called as part of the create operation after - // the runtime environment has been created but before the pivot_root has been executed. - // CreateContainer commands are called in the Container namespace. - CreateContainer HookName = "createContainer" - - // StartContainer commands MUST be called as part of the start operation and before - // the container process is started. - // StartContainer commands are called in the Container namespace. - StartContainer HookName = "startContainer" - - // Poststart commands are executed after the container init process starts. - // Poststart commands are called in the Runtime Namespace. - Poststart HookName = "poststart" - - // Poststop commands are executed after the container init process exits. - // Poststop commands are called in the Runtime Namespace. - Poststop HookName = "poststop" -) - -// KnownHookNames returns the known hook names. -// Used by `runc features`. -func KnownHookNames() []string { - return []string{ - string(Prestart), // deprecated - string(CreateRuntime), - string(CreateContainer), - string(StartContainer), - string(Poststart), - string(Poststop), - } -} - -type Capabilities struct { - // Bounding is the set of capabilities checked by the kernel. - Bounding []string - // Effective is the set of capabilities checked by the kernel. - Effective []string - // Inheritable is the capabilities preserved across execve. - Inheritable []string - // Permitted is the limiting superset for effective capabilities. - Permitted []string - // Ambient is the ambient set of capabilities that are kept. - Ambient []string -} - -// Deprecated: use (Hooks).Run instead. -func (hooks HookList) RunHooks(state *specs.State) error { - for i, h := range hooks { - if err := h.Run(state); err != nil { - return fmt.Errorf("error running hook #%d: %w", i, err) - } - } - - return nil -} - -func (hooks *Hooks) UnmarshalJSON(b []byte) error { - var state map[HookName][]CommandHook - - if err := json.Unmarshal(b, &state); err != nil { - return err - } - - *hooks = Hooks{} - for n, commandHooks := range state { - if len(commandHooks) == 0 { - continue - } - - (*hooks)[n] = HookList{} - for _, h := range commandHooks { - (*hooks)[n] = append((*hooks)[n], h) - } - } - - return nil -} - -func (hooks *Hooks) MarshalJSON() ([]byte, error) { - serialize := func(hooks []Hook) (serializableHooks []CommandHook) { - for _, hook := range hooks { - switch chook := hook.(type) { - case CommandHook: - serializableHooks = append(serializableHooks, chook) - default: - logrus.Warnf("cannot serialize hook of type %T, skipping", hook) - } - } - - return serializableHooks - } - - return json.Marshal(map[string]interface{}{ - "prestart": serialize((*hooks)[Prestart]), - "createRuntime": serialize((*hooks)[CreateRuntime]), - "createContainer": serialize((*hooks)[CreateContainer]), - "startContainer": serialize((*hooks)[StartContainer]), - "poststart": serialize((*hooks)[Poststart]), - "poststop": serialize((*hooks)[Poststop]), - }) -} - -// Run executes all hooks for the given hook name. -func (hooks Hooks) Run(name HookName, state *specs.State) error { - list := hooks[name] - for i, h := range list { - if err := h.Run(state); err != nil { - return fmt.Errorf("error running %s hook #%d: %w", name, i, err) - } - } - - return nil -} - -type Hook interface { - // Run executes the hook with the provided state. - Run(*specs.State) error -} - -// NewFunctionHook will call the provided function when the hook is run. -func NewFunctionHook(f func(*specs.State) error) FuncHook { - return FuncHook{ - run: f, - } -} - -type FuncHook struct { - run func(*specs.State) error -} - -func (f FuncHook) Run(s *specs.State) error { - return f.run(s) -} - -type Command struct { - Path string `json:"path"` - Args []string `json:"args"` - Env []string `json:"env"` - Dir string `json:"dir"` - Timeout *time.Duration `json:"timeout"` -} - -// NewCommandHook will execute the provided command when the hook is run. -func NewCommandHook(cmd Command) CommandHook { - return CommandHook{ - Command: cmd, - } -} - -type CommandHook struct { - Command -} - -func (c Command) Run(s *specs.State) error { - b, err := json.Marshal(s) - if err != nil { - return err - } - var stdout, stderr bytes.Buffer - cmd := exec.Cmd{ - Path: c.Path, - Args: c.Args, - Env: c.Env, - Stdin: bytes.NewReader(b), - Stdout: &stdout, - Stderr: &stderr, - } - if err := cmd.Start(); err != nil { - return err - } - errC := make(chan error, 1) - go func() { - err := cmd.Wait() - if err != nil { - err = fmt.Errorf("%w, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) - } - errC <- err - }() - var timerCh <-chan time.Time - if c.Timeout != nil { - timer := time.NewTimer(*c.Timeout) - defer timer.Stop() - timerCh = timer.C - } - select { - case err := <-errC: - return err - case <-timerCh: - _ = cmd.Process.Kill() - <-errC - return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go deleted file mode 100644 index e401f5331b4..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ /dev/null @@ -1,97 +0,0 @@ -package configs - -import ( - "errors" - "fmt" - "math" -) - -var ( - errNoUIDMap = errors.New("user namespaces enabled, but no uid mappings found") - errNoGIDMap = errors.New("user namespaces enabled, but no gid mappings found") -) - -// Please check https://man7.org/linux/man-pages/man2/personality.2.html for const details. -// https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/personality.h -const ( - PerLinux = 0x0000 - PerLinux32 = 0x0008 -) - -type LinuxPersonality struct { - // Domain for the personality - // can only contain values "LINUX" and "LINUX32" - Domain int `json:"domain"` -} - -// HostUID gets the translated uid for the process on host which could be -// different when user namespaces are enabled. -func (c Config) HostUID(containerId int) (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if len(c.UIDMappings) == 0 { - return -1, errNoUIDMap - } - id, found := c.hostIDFromMapping(int64(containerId), c.UIDMappings) - if !found { - return -1, fmt.Errorf("user namespaces enabled, but no mapping found for uid %d", containerId) - } - // If we are a 32-bit binary running on a 64-bit system, it's possible - // the mapped user is too large to store in an int, which means we - // cannot do the mapping. We can't just return an int64, because - // os.Setuid() takes an int. - if id > math.MaxInt { - return -1, fmt.Errorf("mapping for uid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) - } - return int(id), nil - } - // Return unchanged id. - return containerId, nil -} - -// HostRootUID gets the root uid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostRootUID() (int, error) { - return c.HostUID(0) -} - -// HostGID gets the translated gid for the process on host which could be -// different when user namespaces are enabled. -func (c Config) HostGID(containerId int) (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if len(c.GIDMappings) == 0 { - return -1, errNoGIDMap - } - id, found := c.hostIDFromMapping(int64(containerId), c.GIDMappings) - if !found { - return -1, fmt.Errorf("user namespaces enabled, but no mapping found for gid %d", containerId) - } - // If we are a 32-bit binary running on a 64-bit system, it's possible - // the mapped user is too large to store in an int, which means we - // cannot do the mapping. We can't just return an int64, because - // os.Setgid() takes an int. - if id > math.MaxInt { - return -1, fmt.Errorf("mapping for gid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) - } - return int(id), nil - } - // Return unchanged id. - return containerId, nil -} - -// HostRootGID gets the root gid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostRootGID() (int, error) { - return c.HostGID(0) -} - -// Utility function that gets a host ID for a container ID from user namespace map -// if that ID is present in the map. -func (c Config) hostIDFromMapping(containerID int64, uMap []IDMap) (int64, bool) { - for _, m := range uMap { - if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (containerID - m.ContainerID) - return hostID, true - } - } - return -1, false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go deleted file mode 100644 index 1fd87ce6a4b..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build gofuzz - -package configs - -func FuzzUnmarshalJSON(data []byte) int { - hooks := Hooks{} - _ = hooks.UnmarshalJSON(data) - return 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go deleted file mode 100644 index f8d951ab8b9..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go +++ /dev/null @@ -1,16 +0,0 @@ -package configs - -type IntelRdt struct { - // The identity for RDT Class of Service - ClosID string `json:"closID,omitempty"` - - // The schema for L3 cache id and capacity bitmask (CBM) - // Format: "L3:=;=;..." - L3CacheSchema string `json:"l3_cache_schema,omitempty"` - - // The schema of memory bandwidth per L3 cache id - // Format: "MB:=bandwidth0;=bandwidth1;..." - // The unit of memory bandwidth is specified in "percentages" by - // default, and in "MBps" if MBA Software Controller is enabled. - MemBwSchema string `json:"memBwSchema,omitempty"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go deleted file mode 100644 index bfd356e497f..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ /dev/null @@ -1,7 +0,0 @@ -package configs - -const ( - // EXT_COPYUP is a directive to copy up the contents of a directory when - // a tmpfs is mounted over it. - EXT_COPYUP = 1 << iota //nolint:golint,revive // ignore "don't use ALL_CAPS" warning -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go deleted file mode 100644 index b69e9ab238e..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -package configs - -import "golang.org/x/sys/unix" - -type MountIDMapping struct { - // Recursive indicates if the mapping needs to be recursive. - Recursive bool `json:"recursive"` - - // UserNSPath is a path to a user namespace that indicates the necessary - // id-mappings for MOUNT_ATTR_IDMAP. If set to non-"", UIDMappings and - // GIDMappings must be set to nil. - UserNSPath string `json:"userns_path,omitempty"` - - // UIDMappings is the uid mapping set for this mount, to be used with - // MOUNT_ATTR_IDMAP. - UIDMappings []IDMap `json:"uid_mappings,omitempty"` - - // GIDMappings is the gid mapping set for this mount, to be used with - // MOUNT_ATTR_IDMAP. - GIDMappings []IDMap `json:"gid_mappings,omitempty"` -} - -type Mount struct { - // Source path for the mount. - Source string `json:"source"` - - // Destination path for the mount inside the container. - Destination string `json:"destination"` - - // Device the mount is for. - Device string `json:"device"` - - // Mount flags. - Flags int `json:"flags"` - - // Mount flags that were explicitly cleared in the configuration (meaning - // the user explicitly requested that these flags *not* be set). - ClearedFlags int `json:"cleared_flags"` - - // Propagation Flags - PropagationFlags []int `json:"propagation_flags"` - - // Mount data applied to the mount. - Data string `json:"data"` - - // Relabel source if set, "z" indicates shared, "Z" indicates unshared. - Relabel string `json:"relabel"` - - // RecAttr represents mount properties to be applied recursively (AT_RECURSIVE), see mount_setattr(2). - RecAttr *unix.MountAttr `json:"rec_attr"` - - // Extensions are additional flags that are specific to runc. - Extensions int `json:"extensions"` - - // Mapping is the MOUNT_ATTR_IDMAP configuration for the mount. If non-nil, - // the mount is configured to use MOUNT_ATTR_IDMAP-style id mappings. - IDMapping *MountIDMapping `json:"id_mapping,omitempty"` -} - -func (m *Mount) IsBind() bool { - return m.Flags&unix.MS_BIND != 0 -} - -func (m *Mount) IsIDMapped() bool { - return m.IDMapping != nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go deleted file mode 100644 index 1d4d9fe52a5..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !linux - -package configs - -type Mount struct{} - -func (m *Mount) IsBind() bool { - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go deleted file mode 100644 index a3329a31a9c..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go +++ /dev/null @@ -1,5 +0,0 @@ -package configs - -type NamespaceType string - -type Namespaces []Namespace diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go deleted file mode 100644 index 898f96fd0f5..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go +++ /dev/null @@ -1,133 +0,0 @@ -package configs - -import ( - "fmt" - "os" - "sync" -) - -const ( - NEWNET NamespaceType = "NEWNET" - NEWPID NamespaceType = "NEWPID" - NEWNS NamespaceType = "NEWNS" - NEWUTS NamespaceType = "NEWUTS" - NEWIPC NamespaceType = "NEWIPC" - NEWUSER NamespaceType = "NEWUSER" - NEWCGROUP NamespaceType = "NEWCGROUP" - NEWTIME NamespaceType = "NEWTIME" -) - -var ( - nsLock sync.Mutex - supportedNamespaces = make(map[NamespaceType]bool) -) - -// NsName converts the namespace type to its filename -func NsName(ns NamespaceType) string { - switch ns { - case NEWNET: - return "net" - case NEWNS: - return "mnt" - case NEWPID: - return "pid" - case NEWIPC: - return "ipc" - case NEWUSER: - return "user" - case NEWUTS: - return "uts" - case NEWCGROUP: - return "cgroup" - case NEWTIME: - return "time" - } - return "" -} - -// IsNamespaceSupported returns whether a namespace is available or -// not -func IsNamespaceSupported(ns NamespaceType) bool { - nsLock.Lock() - defer nsLock.Unlock() - supported, ok := supportedNamespaces[ns] - if ok { - return supported - } - nsFile := NsName(ns) - // if the namespace type is unknown, just return false - if nsFile == "" { - return false - } - // We don't need to use /proc/thread-self here because the list of - // namespace types is unrelated to the thread. This lets us avoid having to - // do runtime.LockOSThread. - _, err := os.Stat("/proc/self/ns/" + nsFile) - // a namespace is supported if it exists and we have permissions to read it - supported = err == nil - supportedNamespaces[ns] = supported - return supported -} - -func NamespaceTypes() []NamespaceType { - return []NamespaceType{ - NEWUSER, // Keep user NS always first, don't move it. - NEWIPC, - NEWUTS, - NEWNET, - NEWPID, - NEWNS, - NEWCGROUP, - NEWTIME, - } -} - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct { - Type NamespaceType `json:"type"` - Path string `json:"path"` -} - -func (n *Namespace) GetPath(pid int) string { - return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type)) -} - -func (n *Namespaces) Remove(t NamespaceType) bool { - i := n.index(t) - if i == -1 { - return false - } - *n = append((*n)[:i], (*n)[i+1:]...) - return true -} - -func (n *Namespaces) Add(t NamespaceType, path string) { - i := n.index(t) - if i == -1 { - *n = append(*n, Namespace{Type: t, Path: path}) - return - } - (*n)[i].Path = path -} - -func (n *Namespaces) index(t NamespaceType) int { - for i, ns := range *n { - if ns.Type == t { - return i - } - } - return -1 -} - -func (n *Namespaces) Contains(t NamespaceType) bool { - return n.index(t) != -1 -} - -func (n *Namespaces) PathOf(t NamespaceType) string { - i := n.index(t) - if i == -1 { - return "" - } - return (*n)[i].Path -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go deleted file mode 100644 index 26b70b26fa1..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build linux - -package configs - -import "golang.org/x/sys/unix" - -func (n *Namespace) Syscall() int { - return namespaceInfo[n.Type] -} - -var namespaceInfo = map[NamespaceType]int{ - NEWNET: unix.CLONE_NEWNET, - NEWNS: unix.CLONE_NEWNS, - NEWUSER: unix.CLONE_NEWUSER, - NEWIPC: unix.CLONE_NEWIPC, - NEWUTS: unix.CLONE_NEWUTS, - NEWPID: unix.CLONE_NEWPID, - NEWCGROUP: unix.CLONE_NEWCGROUP, - NEWTIME: unix.CLONE_NEWTIME, -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - var flag int - for _, v := range *n { - if v.Path != "" { - continue - } - flag |= namespaceInfo[v.Type] - } - return uintptr(flag) -} - -// IsPrivate tells whether the namespace of type t is configured as private -// (i.e. it exists and is not shared). -func (n Namespaces) IsPrivate(t NamespaceType) bool { - for _, v := range n { - if v.Type == t { - return v.Path == "" - } - } - // Not found, so implicitly sharing a parent namespace. - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go deleted file mode 100644 index 10bf2436502..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux && !windows - -package configs - -func (n *Namespace) Syscall() int { - panic("No namespace syscall support") -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - panic("No namespace syscall support") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go deleted file mode 100644 index 914684993c9..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux - -package configs - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct{} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go deleted file mode 100644 index c44c3ea71b8..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go +++ /dev/null @@ -1,75 +0,0 @@ -package configs - -// Network defines configuration for a container's networking stack -// -// The network configuration can be omitted from a container causing the -// container to be setup with the host's networking stack -type Network struct { - // Type sets the networks type, commonly veth and loopback - Type string `json:"type"` - - // Name of the network interface - Name string `json:"name"` - - // The bridge to use. - Bridge string `json:"bridge"` - - // MacAddress contains the MAC address to set on the network interface - MacAddress string `json:"mac_address"` - - // Address contains the IPv4 and mask to set on the network interface - Address string `json:"address"` - - // Gateway sets the gateway address that is used as the default for the interface - Gateway string `json:"gateway"` - - // IPv6Address contains the IPv6 and mask to set on the network interface - IPv6Address string `json:"ipv6_address"` - - // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface - IPv6Gateway string `json:"ipv6_gateway"` - - // Mtu sets the mtu value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - Mtu int `json:"mtu"` - - // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - TxQueueLen int `json:"txqueuelen"` - - // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the - // container. - HostInterfaceName string `json:"host_interface_name"` - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // bridge port in the case of type veth - // Note: This is unsupported on some systems. - // Note: This does not apply to loopback interfaces. - HairpinMode bool `json:"hairpin_mode"` -} - -// Route defines a routing table entry. -// -// Routes can be specified to create entries in the routing table as the container -// is started. -// -// All of destination, source, and gateway should be either IPv4 or IPv6. -// One of the three options must be present, and omitted entries will use their -// IP family default for the route table. For IPv4 for example, setting the -// gateway to 1.2.3.4 and the interface to eth0 will set up a standard -// destination of 0.0.0.0(or *) when viewed in the route table. -type Route struct { - // Destination specifies the destination IP address and mask in the CIDR form. - Destination string `json:"destination"` - - // Source specifies the source IP address and mask in the CIDR form. - Source string `json:"source"` - - // Gateway specifies the gateway IP address. - Gateway string `json:"gateway"` - - // InterfaceName specifies the device to set this route up for, for example eth0. - InterfaceName string `json:"interface_name"` -} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go index 83047d0a3bd..8c27fb186a9 100644 --- a/vendor/github.com/pkg/sftp/client.go +++ b/vendor/github.com/pkg/sftp/client.go @@ -909,7 +909,7 @@ func (c *Client) Mkdir(path string) error { // MkdirAll creates a directory named path, along with any necessary parents, // and returns nil, or else returns an error. // If path is already a directory, MkdirAll does nothing and returns nil. -// If path contains a regular file, an error is returned +// If, while making any directory, that path is found to already be a regular file, an error is returned. func (c *Client) MkdirAll(path string) error { // Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13 // Fast path: if we can tell whether path is a directory or file, stop with success or error. @@ -1174,7 +1174,7 @@ func (f *File) readAt(b []byte, off int64) (int, error) { ID: id, Handle: f.handle, Offset: uint64(offset), - Len: uint32(chunkSize), + Len: uint32(len(rb)), }) select { diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go index fb474c4f316..cd656d8fbf0 100644 --- a/vendor/github.com/pkg/sftp/server.go +++ b/vendor/github.com/pkg/sftp/server.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -21,6 +22,18 @@ const ( SftpServerWorkerCount = 8 ) +type file interface { + Stat() (os.FileInfo, error) + ReadAt(b []byte, off int64) (int, error) + WriteAt(b []byte, off int64) (int, error) + Readdir(int) ([]os.FileInfo, error) + Name() string + Truncate(int64) error + Chmod(mode fs.FileMode) error + Chown(uid, gid int) error + Close() error +} + // Server is an SSH File Transfer Protocol (sftp) server. // This is intended to provide the sftp subsystem to an ssh server daemon. // This implementation currently supports most of sftp server protocol version 3, @@ -30,14 +43,15 @@ type Server struct { debugStream io.Writer readOnly bool pktMgr *packetManager - openFiles map[string]*os.File + openFiles map[string]file openFilesLock sync.RWMutex handleCount int workDir string + winRoot bool maxTxPacket uint32 } -func (svr *Server) nextHandle(f *os.File) string { +func (svr *Server) nextHandle(f file) string { svr.openFilesLock.Lock() defer svr.openFilesLock.Unlock() svr.handleCount++ @@ -57,7 +71,7 @@ func (svr *Server) closeHandle(handle string) error { return EBADF } -func (svr *Server) getHandle(handle string) (*os.File, bool) { +func (svr *Server) getHandle(handle string) (file, bool) { svr.openFilesLock.RLock() defer svr.openFilesLock.RUnlock() f, ok := svr.openFiles[handle] @@ -86,7 +100,7 @@ func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) serverConn: svrConn, debugStream: ioutil.Discard, pktMgr: newPktMgr(svrConn), - openFiles: make(map[string]*os.File), + openFiles: make(map[string]file), maxTxPacket: defaultMaxTxPacket, } @@ -118,6 +132,14 @@ func ReadOnly() ServerOption { } } +// WindowsRootEnumeratesDrives configures a Server to serve a virtual '/' for windows that lists all drives +func WindowsRootEnumeratesDrives() ServerOption { + return func(s *Server) error { + s.winRoot = true + return nil + } +} + // WithAllocator enable the allocator. // After processing a packet we keep in memory the allocated slices // and we reuse them for new packets. @@ -215,7 +237,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpLstatPacket: // stat the requested file - info, err := os.Lstat(s.toLocalPath(p.Path)) + info, err := s.lstat(s.toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -289,7 +311,7 @@ func handlePacket(s *Server, p orderedRequest) error { case *sshFxpOpendirPacket: lp := s.toLocalPath(p.Path) - if stat, err := os.Stat(lp); err != nil { + if stat, err := s.stat(lp); err != nil { rpkt = statusFromError(p.ID, err) } else if !stat.IsDir() { rpkt = statusFromError(p.ID, &os.PathError{ @@ -493,7 +515,7 @@ func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { mode = fs.FileMode() & os.ModePerm } - f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, mode) + f, err := svr.openfile(svr.toLocalPath(p.Path), osFlags, mode) if err != nil { return statusFromError(p.ID, err) } diff --git a/vendor/github.com/pkg/sftp/server_posix.go b/vendor/github.com/pkg/sftp/server_posix.go new file mode 100644 index 00000000000..c07d70a043e --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_posix.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package sftp + +import ( + "io/fs" + "os" +) + +func (s *Server) openfile(path string, flag int, mode fs.FileMode) (file, error) { + return os.OpenFile(path, flag, mode) +} + +func (s *Server) lstat(name string) (os.FileInfo, error) { + return os.Lstat(name) +} + +func (s *Server) stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} diff --git a/vendor/github.com/pkg/sftp/server_windows.go b/vendor/github.com/pkg/sftp/server_windows.go index b35be7305b8..e940dba101c 100644 --- a/vendor/github.com/pkg/sftp/server_windows.go +++ b/vendor/github.com/pkg/sftp/server_windows.go @@ -1,8 +1,15 @@ package sftp import ( + "fmt" + "io" + "io/fs" + "os" "path" "path/filepath" + "time" + + "golang.org/x/sys/windows" ) func (s *Server) toLocalPath(p string) string { @@ -12,7 +19,11 @@ func (s *Server) toLocalPath(p string) string { lp := filepath.FromSlash(p) - if path.IsAbs(p) { + if path.IsAbs(p) { // starts with '/' + if len(p) == 1 && s.winRoot { + return `\\.\` // for openfile + } + tmp := lp for len(tmp) > 0 && tmp[0] == '\\' { tmp = tmp[1:] @@ -33,7 +44,150 @@ func (s *Server) toLocalPath(p string) string { // e.g. "/C:" to "C:\\" return tmp } + + if s.winRoot { + // Make it so that "/Windows" is not found, and "/c:/Windows" has to be used + return `\\.\` + tmp + } } return lp } + +func bitsToDrives(bitmap uint32) []string { + var drive rune = 'a' + var drives []string + + for bitmap != 0 && drive <= 'z' { + if bitmap&1 == 1 { + drives = append(drives, string(drive)+":") + } + drive++ + bitmap >>= 1 + } + + return drives +} + +func getDrives() ([]string, error) { + mask, err := windows.GetLogicalDrives() + if err != nil { + return nil, fmt.Errorf("GetLogicalDrives: %w", err) + } + return bitsToDrives(mask), nil +} + +type driveInfo struct { + fs.FileInfo + name string +} + +func (i *driveInfo) Name() string { + return i.name // since the Name() returned from a os.Stat("C:\\") is "\\" +} + +type winRoot struct { + drives []string +} + +func newWinRoot() (*winRoot, error) { + drives, err := getDrives() + if err != nil { + return nil, err + } + return &winRoot{ + drives: drives, + }, nil +} + +func (f *winRoot) Readdir(n int) ([]os.FileInfo, error) { + drives := f.drives + if n > 0 && len(drives) > n { + drives = drives[:n] + } + f.drives = f.drives[len(drives):] + if len(drives) == 0 { + return nil, io.EOF + } + + var infos []os.FileInfo + for _, drive := range drives { + fi, err := os.Stat(drive + `\`) + if err != nil { + return nil, err + } + + di := &driveInfo{ + FileInfo: fi, + name: drive, + } + infos = append(infos, di) + } + + return infos, nil +} + +func (f *winRoot) Stat() (os.FileInfo, error) { + return rootFileInfo, nil +} +func (f *winRoot) ReadAt(b []byte, off int64) (int, error) { + return 0, os.ErrPermission +} +func (f *winRoot) WriteAt(b []byte, off int64) (int, error) { + return 0, os.ErrPermission +} +func (f *winRoot) Name() string { + return "/" +} +func (f *winRoot) Truncate(int64) error { + return os.ErrPermission +} +func (f *winRoot) Chmod(mode fs.FileMode) error { + return os.ErrPermission +} +func (f *winRoot) Chown(uid, gid int) error { + return os.ErrPermission +} +func (f *winRoot) Close() error { + f.drives = nil + return nil +} + +func (s *Server) openfile(path string, flag int, mode fs.FileMode) (file, error) { + if path == `\\.\` && s.winRoot { + return newWinRoot() + } + return os.OpenFile(path, flag, mode) +} + +type winRootFileInfo struct { + name string + modTime time.Time +} + +func (w *winRootFileInfo) Name() string { return w.name } +func (w *winRootFileInfo) Size() int64 { return 0 } +func (w *winRootFileInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 } // read+execute for all +func (w *winRootFileInfo) ModTime() time.Time { return w.modTime } +func (w *winRootFileInfo) IsDir() bool { return true } +func (w *winRootFileInfo) Sys() interface{} { return nil } + +// Create a new root FileInfo +var rootFileInfo = &winRootFileInfo{ + name: "/", + modTime: time.Now(), +} + +func (s *Server) lstat(name string) (os.FileInfo, error) { + if name == `\\.\` && s.winRoot { + return rootFileInfo, nil + } + return os.Lstat(name) +} + +func (s *Server) stat(name string) (os.FileInfo, error) { + if name == `\\.\` && s.winRoot { + return rootFileInfo, nil + } + return os.Stat(name) +} diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 9fa948ebf9d..ed4d259db23 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -6,5 +6,7 @@ cover.out cover-*.out /.idea *.iml +/bbolt /cmd/bbolt/bbolt +.DS_Store diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index 013173af5e9..d8c40e539ce 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.22.6 +1.23.6 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 21407797416..f5a6703a0bb 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -1,6 +1,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" +GOFILES = $(shell find . -name \*.go) TESTFLAGS_RACE=-race=false ifdef ENABLE_RACE @@ -13,9 +14,26 @@ ifdef CPU endif TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) +TESTFLAGS_TIMEOUT=30m +ifdef TIMEOUT + TESTFLAGS_TIMEOUT=$(TIMEOUT) +endif + +TESTFLAGS_ENABLE_STRICT_MODE=false +ifdef ENABLE_STRICT_MODE + TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE) +endif + +.EXPORT_ALL_VARIABLES: +TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} + .PHONY: fmt fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh" + @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') + + @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" + @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: @@ -24,21 +42,23 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic BOLT_CMD=bbolt @@ -55,7 +75,7 @@ gofail-enable: install-gofail gofail enable . .PHONY: gofail-disable -gofail-disable: +gofail-disable: install-gofail gofail disable . .PHONY: install-gofail @@ -65,12 +85,24 @@ install-gofail: .PHONY: test-failpoint test-failpoint: @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - -.PHONY: test-robustness # Running robustness tests requires root permission -test-robustness: - go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ./tests/robustness -test.root + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + +.PHONY: test-robustness # Running robustness tests requires root permission for now +# TODO: Remove sudo once we fully migrate to the prow infrastructure +test-robustness: gofail-enable build + sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root + +.PHONY: test-benchmark-compare +# Runs benchmark tests on the current git ref and the given REF, and compares +# the two. +test-benchmark-compare: install-benchstat + @git fetch + ./scripts/compare_benchmarks.sh $(REF) + +.PHONY: install-benchstat +install-benchstat: + go install golang.org/x/perf/cmd/benchstat@latest diff --git a/vendor/go.etcd.io/bbolt/OWNERS b/vendor/go.etcd.io/bbolt/OWNERS new file mode 100644 index 00000000000..91f168a7980 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala +reviewers: + - fuweid # Wei Fu + - tjungblu # Thomas Jungblut diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index 495a93ef8f3..f365e51e3eb 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -1,10 +1,8 @@ bbolt ===== -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Go Report Card](https://goreportcard.com/badge/go.etcd.io/bbolt?style=flat-square)](https://goreportcard.com/report/go.etcd.io/bbolt) +[![Go Reference](https://pkg.go.dev/badge/go.etcd.io/bbolt.svg)](https://pkg.go.dev/go.etcd.io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) @@ -71,13 +69,14 @@ New minor versions may add additional features to the API. - [LMDB](#lmdb) - [Caveats & Limitations](#caveats--limitations) - [Reading the Source](#reading-the-source) + - [Known Issues](#known-issues) - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started ### Installing -To start using Bolt, install Go and run `go get`: +To start using `bbolt`, install Go and run `go get`: ```sh $ go get go.etcd.io/bbolt@latest ``` @@ -103,7 +102,7 @@ To use bbolt as an embedded key-value store, import as: ```go import bolt "go.etcd.io/bbolt" -db, err := bolt.Open(path, 0666, nil) +db, err := bolt.Open(path, 0600, nil) if err != nil { return err } @@ -298,6 +297,17 @@ db.Update(func(tx *bolt.Tx) error { }) ``` +You can retrieve an existing bucket using the `Tx.Bucket()` function: +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + if b == nil { + return errors.New("bucket does not exist") + } + return nil +}) +``` + You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can @@ -305,6 +315,17 @@ guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. +You can also iterate over all existing top-level buckets with `Tx.ForEach()`: + +```go +db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, b *bolt.Bucket) error { + fmt.Println(string(name)) + return nil + }) + return nil +}) +``` ### Using key/value pairs @@ -336,7 +357,17 @@ exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. -Use the `Bucket.Delete()` function to delete a key from the bucket. +Use the `Bucket.Delete()` function to delete a key from the bucket: + +```go +db.Update(func (tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Delete([]byte("answer")) + return err +}) +``` + +This will delete the key `answers` from the bucket `MyBucket`. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction @@ -654,7 +685,7 @@ uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } @@ -890,7 +921,7 @@ The best places to start are the main entry points into Bolt: - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket + where the key & value will be written. Once the position is found, the bucket materializes the underlying page and the page's parent pages into memory as "nodes". These nodes are where mutations occur during read-write transactions. These changes get flushed to disk during commit. @@ -919,6 +950,21 @@ The best places to start are the main entry points into Bolt: If you have additional notes that could be helpful for others, please submit them via pull request. +## Known Issues + +- bbolt might run into data corruption issue on Linux when the feature + [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in + linux kernel version v5.10, is enabled. The fixes to the issue were included in + linux kernel version v5.17, please refer to links below, + + * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/) + * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e) + * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/) + + Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. + +- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value. + Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802). ## Other Projects Using Bolt @@ -934,13 +980,16 @@ Below is a list of public, open source projects that use Bolt: * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. * [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. @@ -964,6 +1013,7 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_aix.go similarity index 99% rename from vendor/go.etcd.io/bbolt/bolt_unix_aix.go rename to vendor/go.etcd.io/bbolt/bolt_aix.go index 6dea4294dc7..4b424ed4c4e 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ b/vendor/go.etcd.io/bbolt/bolt_aix.go @@ -1,5 +1,4 @@ //go:build aix -// +build aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_android.go b/vendor/go.etcd.io/bbolt/bolt_android.go new file mode 100644 index 00000000000..11890f0d705 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_android.go @@ -0,0 +1,90 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go index 447bc197332..2c67ab10cd0 100644 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -1,5 +1,4 @@ //go:build arm64 -// +build arm64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_loong64.go b/vendor/go.etcd.io/bbolt/bolt_loong64.go index 31c17c1d070..1ef2145c67c 100644 --- a/vendor/go.etcd.io/bbolt/bolt_loong64.go +++ b/vendor/go.etcd.io/bbolt/bolt_loong64.go @@ -1,5 +1,4 @@ //go:build loong64 -// +build loong64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go index a9385beb682..f28a0512a1d 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -1,5 +1,4 @@ //go:build mips64 || mips64le -// +build mips64 mips64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go index ed734ff7f30..708fccdc010 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -1,5 +1,4 @@ //go:build mips || mipsle -// +build mips mipsle package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go index e403f57d8a8..6a21cf33c79 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -1,5 +1,4 @@ //go:build ppc -// +build ppc package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go index fcd86529f93..a32f2462281 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -1,5 +1,4 @@ //go:build ppc64 -// +build ppc64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go index 20234aca466..8fb60dddcb2 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -1,5 +1,4 @@ //go:build ppc64le -// +build ppc64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go index 060f30c73cb..a63d26ab21d 100644 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -1,5 +1,4 @@ //go:build riscv64 -// +build riscv64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go index 92d2755adb4..749ea97e3a0 100644 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -1,5 +1,4 @@ //go:build s390x -// +build s390x package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_solaris.go similarity index 100% rename from vendor/go.etcd.io/bbolt/bolt_unix_solaris.go rename to vendor/go.etcd.io/bbolt/bolt_solaris.go diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 757ae4d1a48..d1922c2d99d 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,5 +1,4 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix +//go:build !windows && !plan9 && !solaris && !aix && !android package bbolt @@ -10,6 +9,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/errors" ) // flock acquires an advisory lock on a file descriptor. @@ -36,7 +37,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index e5dde27454f..ec21ecb85c7 100644 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -8,6 +8,8 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "go.etcd.io/bbolt/errors" ) // fdatasync flushes written data to a file descriptor. @@ -42,7 +44,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. @@ -70,7 +72,7 @@ func mmap(db *DB, sz int) error { return fmt.Errorf("truncate: %s", err) } sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff + sizelo = uint32(sz) } // Open a file mapping handle. @@ -93,7 +95,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go index 81e09a5310f..27face752ed 100644 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !linux && !openbsd -// +build !windows,!plan9,!linux,!openbsd package bbolt diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index f3533d3446b..6371ace972c 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "unsafe" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) const ( @@ -14,8 +17,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - const ( minFillPercent = 0.1 maxFillPercent = 1.0 @@ -27,12 +28,12 @@ const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache + *common.InBucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *common.Page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[common.Pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this @@ -42,21 +43,12 @@ type Bucket struct { FillPercent float64 } -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) + b.nodes = make(map[common.Pgid]*node) } return b } @@ -67,8 +59,8 @@ func (b *Bucket) Tx() *Tx { } // Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root +func (b *Bucket) Root() common.Pgid { + return b.RootPage() } // Writable returns whether the bucket is writable. @@ -105,7 +97,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket { k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 { return nil } @@ -125,8 +117,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Unaligned access requires a copy to be made. const unalignedMask = unsafe.Alignof(struct { - bucket - page + common.InBucket + common.Page }{}) - 1 unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { @@ -136,15 +128,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = &common.InBucket{} + *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0])) } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + if child.RootPage() == 0 { + child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) } return &child @@ -153,13 +145,23 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket %q successfully", key) + } + }() + } if b.tx.db == nil { - return nil, ErrTxClosed + return nil, errors.ErrTxClosed } else if !b.tx.writable { - return nil, ErrTxNotWritable + return nil, errors.ErrTxNotWritable } else if len(key) == 0 { - return nil, ErrBucketNameRequired + return nil, errors.ErrBucketNameRequired } // Insert into node. @@ -173,21 +175,21 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(newKey, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists + if (flags & common.BucketLeafFlag) != 0 { + return nil, errors.ErrBucketExists } - return nil, ErrIncompatibleValue + return nil, errors.ErrIncompatibleValue } // Create empty, inline bucket. var bucket = Bucket{ - bucket: &bucket{}, + InBucket: &common.InBucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } var value = bucket.write() - c.node().put(newKey, newKey, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket @@ -200,39 +202,108 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err +func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket if not exist %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", key) + } + }() + } + + if b.tx.db == nil { + return nil, errors.ErrTxClosed + } else if !b.tx.writable { + return nil, errors.ErrTxNotWritable + } else if len(key) == 0 { + return nil, errors.ErrBucketNameRequired + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + if b.buckets != nil { + if child := b.buckets[string(newKey)]; child != nil { + return child, nil + } + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if there is an existing non-bucket key. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(newKey)] = child + } + + return child, nil + } + return nil, errors.ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + InBucket: &common.InBucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, } - return child, nil + var value = bucket.write() + + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(newKey), nil } // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { +func (b *Bucket) DeleteBucket(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", key, err) + } else { + lg.Debugf("Deleting bucket %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { + child := b.Bucket(newKey) + err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -243,7 +314,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -251,19 +322,119 @@ func (b *Bucket) DeleteBucket(key []byte) error { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Moving bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", key, err) + } else { + lg.Debugf("Moving bucket %q successfully", key) + } + }() + } + + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !b.Writable() || !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx { + lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path()) + return errors.ErrDifferentDB + } + + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + lg.Errorf("An incompatible key %s exists in the source bucket", newKey) + return errors.ErrIncompatibleValue + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket) + return errors.ErrSameBuckets + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(newKey) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + lg.Errorf("An incompatible key %s exists in the target bucket", newKey) + return errors.ErrIncompatibleValue + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(newKey)) + c.node().del(newKey) + + // add te sub-bucket to the destination bucket + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + +// Inspect returns the structure of the bucket. +func (b *Bucket) Inspect() BucketStructure { + return b.recursivelyInspect([]byte("root")) +} + +func (b *Bucket) recursivelyInspect(name []byte) BucketStructure { + bs := BucketStructure{Name: string(name)} + + keyN := 0 + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&common.BucketLeafFlag != 0 { + childBucket := b.Bucket(k) + childBS := childBucket.recursivelyInspect(k) + bs.Children = append(bs.Children, childBS) + } else { + keyN++ + } + } + bs.KeyN = keyN + + return bs +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. +// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { + if (flags & common.BucketLeafFlag) != 0 { return nil } @@ -278,17 +449,27 @@ func (b *Bucket) Get(key []byte) []byte { // If the key exist then its previous value will be overwritten. // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { +func (b *Bucket) Put(key []byte, value []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Putting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", key, err) + } else { + lg.Debugf("Putting key %q successfully", key) + } + }() + } if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } else if len(key) == 0 { - return ErrKeyRequired + return errors.ErrKeyRequired } else if len(key) > MaxKeySize { - return ErrKeyTooLarge + return errors.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge + return errors.ErrValueTooLarge } // Insert into node. @@ -301,8 +482,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // gofail: var beforeBucketPut struct{} @@ -315,11 +496,22 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { +func (b *Bucket) Delete(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", key, err) + } else { + lg.Debugf("Deleting key %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -332,8 +524,8 @@ func (b *Bucket) Delete(key []byte) error { } // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -343,44 +535,46 @@ func (b *Bucket) Delete(key []byte) error { } // Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } +func (b *Bucket) Sequence() uint64 { + return b.InSequence() +} // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Set the sequence. - b.bucket.sequence = v + b.SetInSequence(v) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, ErrTxClosed + return 0, errors.ErrTxClosed } else if !b.Writable() { - return 0, ErrTxNotWritable + return 0, errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil + b.IncSequence() + return b.Sequence(), nil } // ForEach executes a function for each key/value pair in a bucket. @@ -390,7 +584,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -403,11 +597,11 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { + if flags&common.BucketLeafFlag != 0 { if err := fn(k); err != nil { return err } @@ -421,64 +615,64 @@ func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 - if b.root == 0 { + if b.RootPage() == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) + b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { + if p.IsLeafPage() { + s.KeyN += int(p.Count()) // used totals the used bytes for the page - used := pageHeaderSize + used := common.PageHeaderSize - if p.count != 0 { + if p.Count() != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) + used += common.LeafPageElementSize * uintptr(p.Count()-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + lastElement := p.LeafPageElement(p.Count() - 1) + used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize()) } - if b.root == 0 { + if b.RootPage() == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) + s.LeafOverflowN += int(p.Overflow()) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + if (e.Flags() & common.BucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) + subStats.Add(b.openBucket(e.Value()).Stats()) } } } - } else if (p.flags & branchPageFlag) != 0 { + } else if p.IsBranchPage() { s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) + lastElement := p.BranchPageElement(p.Count() - 1) // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) + used += uintptr(lastElement.Pos() + lastElement.Ksize()) s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) + s.BranchOverflowN += int(p.Overflow()) } // Keep track of maximum page depth. @@ -499,29 +693,29 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { +func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0, []pgid{b.root}) + fn(b.page, 0, []common.Pgid{b.RootPage()}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) + b.tx.forEachPage(b.RootPage(), fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { +func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } - b._forEachPageNode(b.root, 0, fn) + b._forEachPageNode(b.RootPage(), 0, fn) } -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { +func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) { var p, n = b.pageNode(pgId) // Execute function. @@ -529,16 +723,16 @@ func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, in // Recursively loop over children. if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + b._forEachPageNode(elem.Pgid(), depth+1, fn) } } } else { if !n.isLeaf { for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) + b._forEachPageNode(inode.Pgid(), depth+1, fn) } } } @@ -561,9 +755,9 @@ func (b *Bucket) spill() error { } // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket + value = make([]byte, unsafe.Sizeof(common.InBucket{})) + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *child.InBucket } // Skip writing the bucket if there are no materialized nodes. @@ -577,10 +771,10 @@ func (b *Bucket) spill() error { if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } - if flags&bucketLeafFlag == 0 { + if flags&common.BucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag) } // Ignore if there's not a materialized root node. @@ -595,16 +789,16 @@ func (b *Bucket) spill() error { b.rootNode = b.rootNode.root() // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + if b.rootNode.pgid >= b.tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid())) } - b.root = b.rootNode.pgid + b.SetRootPage(b.rootNode.pgid) return nil } // inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. +// and if it contains no subbuckets. Otherwise, returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode @@ -615,11 +809,11 @@ func (b *Bucket) inlineable() bool { // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. - var size = pageHeaderSize + var size = common.PageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) - if inode.flags&bucketLeafFlag != 0 { + if inode.Flags()&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false @@ -638,14 +832,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr { func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) + var value = make([]byte, common.BucketHeaderSize+n.size()) // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *b.InBucket // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) n.write(p) return value @@ -662,8 +856,8 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") +func (b *Bucket) node(pgId common.Pgid, parent *node) *node { + common.Assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgId]; n != nil { @@ -682,6 +876,12 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { var p = b.page if p == nil { p = b.tx.page(pgId) + } else { + // if p isn't nil, then it's an inline bucket. + // The pgId must be 0 in this case. + common.Verify(func() { + common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId) + }) } // Read the page into the node and cache it. @@ -696,19 +896,19 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { // free recursively frees all pages in the bucket. func (b *Bucket) free() { - if b.root == 0 { + if b.RootPage() == 0 { return } var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { + b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.txid, p) + tx.db.freelist.Free(tx.meta.Txid(), p) } else { n.free() } }) - b.root = 0 + b.SetRootPage(0) } // dereference removes all references to the old mmap. @@ -723,11 +923,11 @@ func (b *Bucket) dereference() { } // pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { +// Otherwise, returns the underlying page. +func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { + if b.RootPage() == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } @@ -797,3 +997,9 @@ func cloneBytes(v []byte) []byte { copy(clone, v) return clone } + +type BucketStructure struct { + Name string `json:"name"` // name of the bucket + KeyN int `json:"keyN"` // number of key/value pairs + Children []BucketStructure `json:"buckets,omitempty"` // child buckets +} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index bbfd92a9bc1..0c1e28c106f 100644 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "sort" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket @@ -30,9 +33,9 @@ func (c *Cursor) Bucket() *Bucket { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -40,7 +43,7 @@ func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.goToFirstElementOnTheStack() @@ -51,7 +54,7 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil, flags } return k, v, flags @@ -61,9 +64,9 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) @@ -80,7 +83,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -90,9 +93,9 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -102,9 +105,9 @@ func (c *Cursor) Next() (key []byte, value []byte) { // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -115,7 +118,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) { // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.seek(seek) @@ -126,7 +129,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { if k == nil { return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { + } else if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -136,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !c.bucket.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } c.node().del(key) @@ -156,7 +159,7 @@ func (c *Cursor) Delete() error { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] - c.search(seek, c.bucket.root) + c.search(seek, c.bucket.RootPage()) // If this is a bucket then return a nil value. return c.keyValue() @@ -172,11 +175,11 @@ func (c *Cursor) goToFirstElementOnTheStack() { } // Keep adding pages pointing to the first element to the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) @@ -193,11 +196,11 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) @@ -277,10 +280,10 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { } // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { +func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + if p != nil && !p.IsBranchPage() && !p.IsLeafPage() { + panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) @@ -303,7 +306,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) + ret := bytes.Compare(n.inodes[i].Key(), key) if ret == 0 { exact = true } @@ -315,18 +318,18 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) + c.search(key, n.inodes[index].Pgid()) } -func (c *Cursor) searchPage(key []byte, p *page) { +func (c *Cursor) searchPage(key []byte, p *common.Page) { // Binary search for the correct range. - inodes := p.branchPageElements() + inodes := p.BranchPageElements() var exact bool - index := sort.Search(int(p.count), func(i int) bool { + index := sort.Search(int(p.Count()), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) + ret := bytes.Compare(inodes[i].Key(), key) if ret == 0 { exact = true } @@ -338,7 +341,7 @@ func (c *Cursor) searchPage(key []byte, p *page) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, inodes[index].pgid) + c.search(key, inodes[index].Pgid()) } // nsearch searches the leaf node on the top of the stack for a key. @@ -349,16 +352,16 @@ func (c *Cursor) nsearch(key []byte) { // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 + return bytes.Compare(n.inodes[i].Key(), key) != -1 }) e.index = index return } // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 + inodes := p.LeafPageElements() + index := sort.Search(int(p.Count()), func(i int) bool { + return bytes.Compare(inodes[i].Key(), key) != -1 }) e.index = index } @@ -375,17 +378,17 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags + return inode.Key(), inode.Value(), inode.Flags() } // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags + elem := ref.page.LeafPageElement(uint16(ref.index)) + return elem.Key(), elem.Value(), elem.Flags() } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { @@ -395,19 +398,19 @@ func (c *Cursor) node() *node { // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) + n = c.bucket.node(c.stack[0].page.Id(), nil) } for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") + common.Assert(!n.isLeaf, "expected branch node") n = n.childAt(ref.index) } - _assert(n.isLeaf, "expected leaf node") + common.Assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { - page *page + page *common.Page node *node index int } @@ -417,7 +420,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.flags & leafPageFlag) != 0 + return r.page.IsLeafPage() } // count returns the number of inodes or page elements. @@ -425,5 +428,5 @@ func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } - return int(r.page.count) + return int(r.page.Count()) } diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 822798e41a5..5c1947e9984 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -3,49 +3,28 @@ package bbolt import ( "errors" "fmt" - "hash/fnv" "io" "os" "runtime" - "sort" "sync" "time" "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" + fl "go.etcd.io/bbolt/internal/freelist" ) -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond // FreelistType is the type of the freelist backend type FreelistType string +// TODO(ahrtr): eventually we should (step by step) +// 1. default to `FreelistMapType`; +// 2. remove the `FreelistArrayType`, do not export `FreelistMapType` +// and remove field `FreelistType' from both `DB` and `Options`; const ( // FreelistArrayType indicates backend freelist type is array FreelistArrayType = FreelistType("array") @@ -137,6 +116,8 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool + logger Logger + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -146,15 +127,14 @@ type DB struct { dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta + meta0 *common.Meta + meta1 *common.Meta pageSize int opened bool rwtx *Tx txs []*Tx - freelist *freelist + freelist fl.Interface freelistLoad sync.Once pagePool sync.Pool @@ -191,13 +171,15 @@ func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. +// Open creates and opens a database at the given path with a given file mode. +// If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ +// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 +func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { + db = &DB{ opened: true, } + // Set default options if no options are provided. if options == nil { options = DefaultOptions @@ -211,9 +193,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.Mlock = options.Mlock // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize + db.MaxBatchSize = common.DefaultMaxBatchSize + db.MaxBatchDelay = common.DefaultMaxBatchDelay + db.AllocSize = common.DefaultAllocSize + + if options.Logger == nil { + db.logger = getDiscardLogger() + } else { + db.logger = options.Logger + } + + lg := db.Logger() + if lg != discardLogger { + lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + } flag := os.O_RDWR if options.ReadOnly { @@ -222,6 +222,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // always load free pages in write mode db.PreLoadFreelist = true + flag |= os.O_CREATE } db.openFile = options.OpenFile @@ -230,9 +231,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() + lg.Errorf("failed to open db file (%s): %v", path, err) return nil, err } db.path = db.file.Name() @@ -244,8 +245,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { + if err = flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() + lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err) return nil, err } @@ -254,27 +256,28 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if db.pageSize = options.PageSize; db.pageSize == 0 { // Set the default page size to the OS page size. - db.pageSize = defaultPageSize + db.pageSize = common.DefaultPageSize } // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { + if info, statErr := db.file.Stat(); statErr != nil { _ = db.close() - return nil, err + lg.Errorf("failed to get db file's stats (%s): %v", path, err) + return nil, statErr } else if info.Size() == 0 { // Initialize new files with meta pages. - if err := db.init(); err != nil { + if err = db.init(); err != nil { // clean up file descriptor on initialization fail _ = db.close() + lg.Errorf("failed to initialize db file (%s): %v", path, err) return nil, err } } else { // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { + if db.pageSize, err = db.getPageSize(); err != nil { _ = db.close() - return nil, ErrInvalid + lg.Errorf("failed to get page size from db file (%s): %v", path, err) + return nil, err } } @@ -286,8 +289,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { + if err = db.mmap(options.InitialMmapSize); err != nil { _ = db.close() + lg.Errorf("failed to map db file (%s): %v", path, err) return nil, err } @@ -302,13 +306,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) + tx, txErr := db.Begin(true) if tx != nil { - err = tx.Commit() + txErr = tx.Commit() } - if err != nil { + if txErr != nil { + lg.Errorf("starting readwrite transaction failed: %v", txErr) _ = db.close() - return nil, err + return nil, txErr } } @@ -352,7 +357,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, ErrInvalid + return 0, berrors.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -361,11 +366,11 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { var metaCanRead bool if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -397,13 +402,13 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { bw, err := db.file.ReadAt(buf[:], pos) if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -414,17 +419,29 @@ func (db *DB) loadFreelist() { db.freelist = newFreelist(db.FreelistType) if !db.hasSyncedFreelist() { // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) + db.freelist.Init(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) + db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.free_count() + db.stats.FreePageN = db.freelist.FreeCount() }) } func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist + return db.meta().Freelist() != common.PgidNoFreelist +} + +func (db *DB) fileSize() (int, error) { + info, err := db.file.Stat() + if err != nil { + return 0, fmt.Errorf("file stat error: %w", err) + } + sz := int(info.Size()) + if sz < db.pageSize*2 { + return 0, fmt.Errorf("file size too small %d", sz) + } + return sz, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. @@ -433,21 +450,22 @@ func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } + lg := db.Logger() // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) + var fileSize int + fileSize, err = db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } var size = fileSize if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { + lg.Errorf("getting map size failed: %w", err) return err } @@ -472,6 +490,7 @@ func (db *DB) mmap(minsz int) (err error) { // gofail: var mapError string // return errors.New(mapError) if err = mmap(db, size); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err) return err } @@ -493,15 +512,16 @@ func (db *DB) mmap(minsz int) (err error) { } // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() + db.meta0 = db.page(0).Meta() + db.meta1 = db.page(1).Meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() + err0 := db.meta0.Validate() + err1 := db.meta1.Validate() if err0 != nil && err1 != nil { + lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1) return err0 } @@ -524,6 +544,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("unmap error: %v", err.Error()) } @@ -543,13 +564,13 @@ func (db *DB) mmapSize(size int) (int, error) { // Verify the requested size is not above the maximum allowed. if size > maxMapSize { - return 0, fmt.Errorf("mmap too large") + return 0, errors.New("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder + if remainder := sz % int64(common.MaxMmapStep); remainder > 0 { + sz += int64(common.MaxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. @@ -571,6 +592,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("munlock error: %v", err.Error()) } return nil @@ -580,6 +602,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("mlock error: %v", err.Error()) } return nil @@ -600,42 +623,43 @@ func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag + p := db.pageInBuffer(buf, common.Pgid(i)) + p.SetId(common.Pgid(i)) + p.SetFlags(common.MetaPageFlag) // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() + m := p.Meta() + m.SetMagic(common.Magic) + m.SetVersion(common.Version) + m.SetPageSize(uint32(db.pageSize)) + m.SetFreelist(2) + m.SetRootBucket(common.NewInBucket(3, 0)) + m.SetPgid(4) + m.SetTxid(common.Txid(i)) + m.SetChecksum(m.Sum64()) } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 + p := db.pageInBuffer(buf, common.Pgid(2)) + p.SetId(2) + p.SetFlags(common.FreelistPageFlag) + p.SetCount(0) // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 + p = db.pageInBuffer(buf, common.Pgid(3)) + p.SetId(3) + p.SetFlags(common.LeafPageFlag) + p.SetCount(0) // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { + db.Logger().Errorf("writeAt failed: %w", err) return err } if err := fdatasync(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } - db.filesz = len(buf) return nil } @@ -716,13 +740,31 @@ func (db *DB) close() error { // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { +func (db *DB) Begin(writable bool) (t *Tx, err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + lg.Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + } + if writable { return db.beginRWTx() } return db.beginTx() } +func (db *DB) Logger() Logger { + if db == nil || db.logger == nil { + return getDiscardLogger() + } + return db.logger +} + func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the @@ -738,14 +780,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. @@ -755,6 +797,9 @@ func (db *DB) beginTx() (*Tx, error) { // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) + if db.freelist != nil { + db.freelist.AddReadonlyTXID(t.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -771,7 +816,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, ErrDatabaseReadOnly + return nil, berrors.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -786,49 +831,23 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() + db.freelist.ReleasePendingPages() return t, nil } -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -848,6 +867,9 @@ func (db *DB) removeTx(tx *Tx) { } } n := len(db.txs) + if db.freelist != nil { + db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -1056,7 +1078,20 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } +func (db *DB) Sync() (err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debug("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + lg.Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + } + + return fdatasync(db) +} // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. @@ -1069,37 +1104,37 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") + common.Assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) +func (db *DB) page(id common.Pgid) *common.Page { + pos := id * common.Pgid(db.pageSize) + return (*common.Page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page { + return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)])) } // meta retrieves the current meta page reference. -func (db *DB) meta() *meta { +func (db *DB) meta() *common.Meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { + if db.meta1.Txid() > db.meta0.Txid() { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { + if err := metaA.Validate(); err == nil { return metaA - } else if err := metaB.validate(); err == nil { + } else if err := metaB.Validate(); err == nil { return metaB } @@ -1109,7 +1144,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -1117,17 +1152,18 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } else { buf = make([]byte, count*db.pageSize) } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) + p := (*common.Page)(unsafe.Pointer(&buf[0])) + p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + p.SetId(db.freelist.Allocate(txid, count)) + if p.Id() != 0 { return p, nil } // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize + p.SetId(db.rwtx.meta.Pgid()) + var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) @@ -1135,7 +1171,8 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) + curPgid := db.rwtx.meta.Pgid() + db.rwtx.meta.SetPgid(curPgid + common.Pgid(count)) return p, nil } @@ -1143,7 +1180,13 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. - if sz <= db.filesz { + lg := db.Logger() + fileSize, err := db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } + if sz <= fileSize { return nil } @@ -1162,21 +1205,22 @@ func (db *DB) grow(sz int) error { // gofail: var resizeFileError string // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err) return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("file sync error: %s", err) } if db.Mlock { // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { + if err := db.mrelock(fileSize, sz); err != nil { return fmt.Errorf("mlock/munlock error: %s", err) } } } - db.filesz = sz return nil } @@ -1184,7 +1228,7 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { +func (db *DB) freepages() []common.Pgid { tx, err := db.beginTx() defer func() { err = tx.Rollback() @@ -1196,21 +1240,21 @@ func (db *DB) freepages() []pgid { panic("freepages: failed to open read only tx") } - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) + reachable := make(map[common.Pgid]*common.Page) + nofreed := make(map[common.Pgid]bool) ech := make(chan error) go func() { for e := range ech { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { + var fids []common.Pgid + for i := common.Pgid(2); i < db.meta().Pgid(); i++ { if _, ok := reachable[i]; !ok { fids = append(fids, i) } @@ -1218,11 +1262,17 @@ func (db *DB) freepages() []pgid { return fids } +func newFreelist(freelistType FreelistType) fl.Interface { + if freelistType == FreelistMapType { + return fl.NewHashMapFreelist() + } + return fl.NewArrayFreelist() +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. + // When set to zero it will wait indefinitely. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. @@ -1277,6 +1327,19 @@ type Options struct { // It prevents potential page faults, however // used memory can't be reclaimed. (UNIX only) Mlock bool + + // Logger is the logger used for bbolt. + Logger Logger +} + +func (o *Options) String() string { + if o == nil { + return "{}" + } + + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -1327,65 +1390,3 @@ type Info struct { Data uintptr PageSize int } - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go index f2c3b20ed8b..02958c86f5d 100644 --- a/vendor/go.etcd.io/bbolt/errors.go +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -1,78 +1,108 @@ package bbolt -import "errors" +import "go.etcd.io/bbolt/errors" // These errors can be returned when opening or calling methods on a DB. var ( // ErrDatabaseNotOpen is returned when a DB instance is accessed before it // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalid = errors.ErrInvalid // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalidMapping = errors.ErrInvalidMapping // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrVersionMismatch = errors.ErrVersionMismatch - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrChecksum = errors.ErrChecksum // ErrTimeout is returned when a database cannot obtain an exclusive lock // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTimeout = errors.ErrTimeout ) // These errors can occur when beginning or committing a Tx. var ( // ErrTxNotWritable is returned when performing a write operation on a // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxNotWritable = errors.ErrTxNotWritable // ErrTxClosed is returned when committing or rolling back a transaction // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxClosed = errors.ErrTxClosed // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly // ErrFreePagesNotLoaded is returned when a readonly transaction without // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded ) // These errors can occur when putting or deleting a value or a bucket. var ( // ErrBucketNotFound is returned when trying to access a bucket that has // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNotFound = errors.ErrBucketNotFound // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketExists = errors.ErrBucketExists // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNameRequired = errors.ErrBucketNameRequired // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyRequired = errors.ErrKeyRequired // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyTooLarge = errors.ErrKeyTooLarge // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrValueTooLarge = errors.ErrValueTooLarge // ErrIncompatibleValue is returned when trying create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrIncompatibleValue = errors.ErrIncompatibleValue ) diff --git a/vendor/go.etcd.io/bbolt/errors/errors.go b/vendor/go.etcd.io/bbolt/errors/errors.go new file mode 100644 index 00000000000..c115289e56c --- /dev/null +++ b/vendor/go.etcd.io/bbolt/errors/errors.go @@ -0,0 +1,84 @@ +// Package errors defines the error variables that may be returned +// during bbolt operations. +package errors + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying to create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") + + // ErrDifferentDB is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are in different database files. + ErrDifferentDB = errors.New("the source and target buckets are in different database files") +) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index dffc7bc749b..00000000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,410 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - - // Remove pgids which are allocated by this txid - for pgid, tid := range f.allocs { - if tid == txid { - delete(f.allocs, pgid) - } - } - - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(pgid(0)), idx) - ids := unsafe.Slice((*pgid)(data), count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l) - f.copyall(ids) - } else { - p.count = 0xFFFF - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e736..00000000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/vendor/go.etcd.io/bbolt/internal/common/bucket.go new file mode 100644 index 00000000000..2b4ab1453a1 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bucket.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "unsafe" +) + +const BucketHeaderSize = int(unsafe.Sizeof(InBucket{})) + +// InBucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type InBucket struct { + root Pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +func NewInBucket(root Pgid, seq uint64) InBucket { + return InBucket{ + root: root, + sequence: seq, + } +} + +func (b *InBucket) RootPage() Pgid { + return b.root +} + +func (b *InBucket) SetRootPage(id Pgid) { + b.root = id +} + +// InSequence returns the sequence. The reason why not naming it `Sequence` +// is to avoid duplicated name as `(*Bucket) Sequence()` +func (b *InBucket) InSequence() uint64 { + return b.sequence +} + +func (b *InBucket) SetInSequence(v uint64) { + b.sequence = v +} + +func (b *InBucket) IncSequence() { + b.sequence++ +} + +func (b *InBucket) InlinePage(v []byte) *Page { + return (*Page)(unsafe.Pointer(&v[BucketHeaderSize])) +} + +func (b *InBucket) String() string { + return fmt.Sprintf("", b.root, b.sequence) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/inode.go b/vendor/go.etcd.io/bbolt/internal/common/inode.go new file mode 100644 index 00000000000..080b9af789d --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/inode.go @@ -0,0 +1,115 @@ +package common + +import "unsafe" + +// Inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type Inode struct { + flags uint32 + pgid Pgid + key []byte + value []byte +} + +type Inodes []Inode + +func (in *Inode) Flags() uint32 { + return in.flags +} + +func (in *Inode) SetFlags(flags uint32) { + in.flags = flags +} + +func (in *Inode) Pgid() Pgid { + return in.pgid +} + +func (in *Inode) SetPgid(id Pgid) { + in.pgid = id +} + +func (in *Inode) Key() []byte { + return in.key +} + +func (in *Inode) SetKey(key []byte) { + in.key = key +} + +func (in *Inode) Value() []byte { + return in.value +} + +func (in *Inode) SetValue(value []byte) { + in.value = value +} + +func ReadInodeFromPage(p *Page) Inodes { + inodes := make(Inodes, int(p.Count())) + isLeaf := p.IsLeafPage() + for i := 0; i < int(p.Count()); i++ { + inode := &inodes[i] + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) + } else { + elem := p.BranchPageElement(uint16(i)) + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) + } + Assert(len(inode.Key()) > 0, "read: zero-length inode key") + } + + return inodes +} + +func WriteInodeToPage(inodes Inodes, p *Page) uint32 { + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + isLeaf := p.IsLeafPage() + for i, item := range inodes { + Assert(len(item.Key()) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.Key()) + len(item.Value()) + b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) + } else { + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) + Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.Key()) + copy(b[l:], item.Value()) + } + + return uint32(off) +} + +func UsedSpaceInPage(inodes Inodes, p *Page) uint32 { + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + for _, item := range inodes { + sz := len(item.Key()) + len(item.Value()) + off += uintptr(sz) + } + + return uint32(off) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/meta.go b/vendor/go.etcd.io/bbolt/internal/common/meta.go new file mode 100644 index 00000000000..055388604af --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/meta.go @@ -0,0 +1,161 @@ +package common + +import ( + "fmt" + "hash/fnv" + "io" + "unsafe" + + "go.etcd.io/bbolt/errors" +) + +type Meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root InBucket + freelist Pgid + pgid Pgid + txid Txid + checksum uint64 +} + +// Validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *Meta) Validate() error { + if m.magic != Magic { + return errors.ErrInvalid + } else if m.version != Version { + return errors.ErrVersionMismatch + } else if m.checksum != m.Sum64() { + return errors.ErrChecksum + } + return nil +} + +// Copy copies one meta object to another. +func (m *Meta) Copy(dest *Meta) { + *dest = *m +} + +// Write writes the meta onto a page. +func (m *Meta) Write(p *Page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = Pgid(m.txid % 2) + p.SetFlags(MetaPageFlag) + + // Calculate the checksum. + m.checksum = m.Sum64() + + m.Copy(p.Meta()) +} + +// Sum64 generates the checksum for the meta. +func (m *Meta) Sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +func (m *Meta) Magic() uint32 { + return m.magic +} + +func (m *Meta) SetMagic(v uint32) { + m.magic = v +} + +func (m *Meta) Version() uint32 { + return m.version +} + +func (m *Meta) SetVersion(v uint32) { + m.version = v +} + +func (m *Meta) PageSize() uint32 { + return m.pageSize +} + +func (m *Meta) SetPageSize(v uint32) { + m.pageSize = v +} + +func (m *Meta) Flags() uint32 { + return m.flags +} + +func (m *Meta) SetFlags(v uint32) { + m.flags = v +} + +func (m *Meta) SetRootBucket(b InBucket) { + m.root = b +} + +func (m *Meta) RootBucket() *InBucket { + return &m.root +} + +func (m *Meta) Freelist() Pgid { + return m.freelist +} + +func (m *Meta) SetFreelist(v Pgid) { + m.freelist = v +} + +func (m *Meta) IsFreelistPersisted() bool { + return m.freelist != PgidNoFreelist +} + +func (m *Meta) Pgid() Pgid { + return m.pgid +} + +func (m *Meta) SetPgid(id Pgid) { + m.pgid = id +} + +func (m *Meta) Txid() Txid { + return m.txid +} + +func (m *Meta) SetTxid(id Txid) { + m.txid = id +} + +func (m *Meta) IncTxid() { + m.txid += 1 +} + +func (m *Meta) DecTxid() { + m.txid -= 1 +} + +func (m *Meta) Checksum() uint64 { + return m.checksum +} + +func (m *Meta) SetChecksum(v uint64) { + m.checksum = v +} + +func (m *Meta) Print(w io.Writer) { + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/page.go b/vendor/go.etcd.io/bbolt/internal/common/page.go new file mode 100644 index 00000000000..ee808967c57 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/page.go @@ -0,0 +1,391 @@ +package common + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const PageHeaderSize = unsafe.Sizeof(Page{}) + +const MinKeysPerPage = 2 + +const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) +const pgidSize = unsafe.Sizeof(Pgid(0)) + +const ( + BranchPageFlag = 0x01 + LeafPageFlag = 0x02 + MetaPageFlag = 0x04 + FreelistPageFlag = 0x10 +) + +const ( + BucketLeafFlag = 0x01 +) + +type Pgid uint64 + +type Page struct { + id Pgid + flags uint16 + count uint16 + overflow uint32 +} + +func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { + return &Page{ + id: id, + flags: flags, + count: count, + overflow: overflow, + } +} + +// Typ returns a human-readable page type string used for debugging. +func (p *Page) Typ() string { + if p.IsBranchPage() { + return "branch" + } else if p.IsLeafPage() { + return "leaf" + } else if p.IsMetaPage() { + return "meta" + } else if p.IsFreelistPage() { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +func (p *Page) IsBranchPage() bool { + return p.flags == BranchPageFlag +} + +func (p *Page) IsLeafPage() bool { + return p.flags == LeafPageFlag +} + +func (p *Page) IsMetaPage() bool { + return p.flags == MetaPageFlag +} + +func (p *Page) IsFreelistPage() bool { + return p.flags == FreelistPageFlag +} + +// Meta returns a pointer to the metadata section of the page. +func (p *Page) Meta() *Meta { + return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *Page) FastCheck(id Pgid) { + Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + Assert(p.IsBranchPage() || + p.IsLeafPage() || + p.IsMetaPage() || + p.IsFreelistPage(), + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// LeafPageElement retrieves the leaf node by index +func (p *Page) LeafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + LeafPageElementSize, int(index))) +} + +// LeafPageElements retrieves a list of leaf nodes. +func (p *Page) LeafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) + return elems +} + +// BranchPageElement retrieves the branch node by index +func (p *Page) BranchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// BranchPageElements retrieves a list of branch nodes. +func (p *Page) BranchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) + return elems +} + +func (p *Page) FreelistPageCount() (int, int) { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + return idx, count +} + +func (p *Page) FreelistPageIds() []Pgid { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + + idx, count := p.FreelistPageCount() + + if count == 0 { + return nil + } + + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx) + ids := unsafe.Slice((*Pgid)(data), count) + + return ids +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *Page) hexdump(n int) { + buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +func (p *Page) PageElementSize() uintptr { + if p.IsLeafPage() { + return LeafPageElementSize + } + return BranchPageElementSize +} + +func (p *Page) Id() Pgid { + return p.id +} + +func (p *Page) SetId(target Pgid) { + p.id = target +} + +func (p *Page) Flags() uint16 { + return p.flags +} + +func (p *Page) SetFlags(v uint16) { + p.flags = v +} + +func (p *Page) Count() uint16 { + return p.count +} + +func (p *Page) SetCount(target uint16) { + p.count = target +} + +func (p *Page) Overflow() uint32 { + return p.overflow +} + +func (p *Page) SetOverflow(target uint32) { + p.overflow = target +} + +func (p *Page) String() string { + return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow) +} + +type Pages []*Page + +func (s Pages) Len() int { return len(s) } +func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid Pgid +} + +func (n *branchPageElement) Pos() uint32 { + return n.pos +} + +func (n *branchPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *branchPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *branchPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *branchPageElement) Pgid() Pgid { + return n.pgid +} + +func (n *branchPageElement) SetPgid(v Pgid) { + n.pgid = v +} + +// Key returns a byte slice of the node key. +func (n *branchPageElement) Key() []byte { + return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement { + return &leafPageElement{ + flags: flags, + pos: pos, + ksize: ksize, + vsize: vsize, + } +} + +func (n *leafPageElement) Flags() uint32 { + return n.flags +} + +func (n *leafPageElement) SetFlags(v uint32) { + n.flags = v +} + +func (n *leafPageElement) Pos() uint32 { + return n.pos +} + +func (n *leafPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *leafPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *leafPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *leafPageElement) Vsize() uint32 { + return n.vsize +} + +func (n *leafPageElement) SetVsize(v uint32) { + n.vsize = v +} + +// Key returns a byte slice of the node key. +func (n *leafPageElement) Key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// Value returns a byte slice of the node value. +func (n *leafPageElement) Value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +func (n *leafPageElement) IsBucketEntry() bool { + return n.flags&uint32(BucketLeafFlag) != 0 +} + +func (n *leafPageElement) Bucket() *InBucket { + if n.IsBucketEntry() { + return LoadBucket(n.Value()) + } else { + return nil + } +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type Pgids []Pgid + +func (s Pgids) Len() int { return len(s) } +func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } + +// Merge returns the sorted union of a and b. +func (a Pgids) Merge(b Pgids) Pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(Pgids, len(a)+len(b)) + Mergepgids(merged, a, b) + return merged +} + +// Mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func Mergepgids(dst, a, b Pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go new file mode 100644 index 00000000000..8ad8279a090 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/types.go @@ -0,0 +1,40 @@ +package common + +import ( + "os" + "runtime" + "time" +) + +// MaxMmapStep is the largest step that can be taken when remapping the mmap. +const MaxMmapStep = 1 << 30 // 1GB + +// Version represents the data file format version. +const Version uint32 = 2 + +// Magic represents a marker value to indicate that a file is a Bolt DB. +const Magic uint32 = 0xED0CDAED + +const PgidNoFreelist Pgid = 0xffffffffffffffff + +// DO NOT EDIT. Copied from the "bolt" package. +const pageMaxAllocSize = 0xFFFFFFF + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DefaultPageSize is the default page size for db which is set to the OS page size. +var DefaultPageSize = os.Getpagesize() + +// Txid represents the internal transaction identifier. +type Txid uint64 diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go similarity index 74% rename from vendor/go.etcd.io/bbolt/unsafe.go rename to vendor/go.etcd.io/bbolt/internal/common/unsafe.go index 7745d32ce19..9b77dd7b2b8 100644 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go @@ -1,18 +1,18 @@ -package bbolt +package common import ( "unsafe" ) -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { +func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset) } -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { +func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) } -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { +func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices // // This memory is not allocated from C, but it is unmanaged by Go's @@ -23,5 +23,5 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] + return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/vendor/go.etcd.io/bbolt/internal/common/utils.go b/vendor/go.etcd.io/bbolt/internal/common/utils.go new file mode 100644 index 00000000000..bdf82a7b00b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/utils.go @@ -0,0 +1,64 @@ +package common + +import ( + "fmt" + "io" + "os" + "unsafe" +) + +func LoadBucket(buf []byte) *InBucket { + return (*InBucket)(unsafe.Pointer(&buf[0])) +} + +func LoadPage(buf []byte) *Page { + return (*Page)(unsafe.Pointer(&buf[0])) +} + +func LoadPageMeta(buf []byte) *Meta { + return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) +} + +func CopyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/verify.go b/vendor/go.etcd.io/bbolt/internal/common/verify.go new file mode 100644 index 00000000000..eac95e26301 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/verify.go @@ -0,0 +1,67 @@ +// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go +package common + +import ( + "fmt" + "os" + "strings" +) + +const ENV_VERIFY = "BBOLT_VERIFY" + +type VerificationType string + +const ( + ENV_VERIFY_VALUE_ALL VerificationType = "all" + ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(ENV_VERIFY)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(ENV_VERIFY, string(verification)) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(ENV_VERIFY_VALUE_ALL) +} + +// DisableVerifications unsets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(ENV_VERIFY) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/vendor/go.etcd.io/bbolt/internal/freelist/array.go new file mode 100644 index 00000000000..0cc1ba71503 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/array.go @@ -0,0 +1,108 @@ +package freelist + +import ( + "fmt" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +type array struct { + *shared + + ids []common.Pgid // all free and available free page ids. +} + +func (f *array) Init(ids common.Pgids) { + f.ids = ids + f.reindex() +} + +func (f *array) Allocate(txid common.Txid, n int) common.Pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd common.Pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == common.Pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +func (f *array) FreeCount() int { + return len(f.ids) +} + +func (f *array) freePageIds() common.Pgids { + return f.ids +} + +func (f *array) mergeSpans(ids common.Pgids) { + sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) + f.ids = common.Pgids(f.ids).Merge(ids) +} + +func NewArrayFreelist() Interface { + a := &array{ + shared: newShared(), + ids: []common.Pgid{}, + } + a.Interface = a + return a +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go new file mode 100644 index 00000000000..2b819506bda --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go @@ -0,0 +1,82 @@ +package freelist + +import ( + "go.etcd.io/bbolt/internal/common" +) + +type ReadWriter interface { + // Read calls Init with the page ids stored in the given page. + Read(page *common.Page) + + // Write writes the freelist into the given page. + Write(page *common.Page) + + // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write. + // This should never underestimate the size. + EstimatedWritePageSize() int +} + +type Interface interface { + ReadWriter + + // Init initializes this freelist with the given list of pages. + Init(ids common.Pgids) + + // Allocate tries to allocate the given number of contiguous pages + // from the free list pages. It returns the starting page ID if + // available; otherwise, it returns 0. + Allocate(txid common.Txid, numPages int) common.Pgid + + // Count returns the number of free and pending pages. + Count() int + + // FreeCount returns the number of free pages. + FreeCount() int + + // PendingCount returns the number of pending pages. + PendingCount() int + + // AddReadonlyTXID adds a given read-only transaction id for pending page tracking. + AddReadonlyTXID(txid common.Txid) + + // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking. + RemoveReadonlyTXID(txid common.Txid) + + // ReleasePendingPages releases any pages associated with closed read-only transactions. + ReleasePendingPages() + + // Free releases a page and its overflow for a given transaction id. + // If the page is already free or is one of the meta pages, then a panic will occur. + Free(txId common.Txid, p *common.Page) + + // Freed returns whether a given page is in the free list. + Freed(pgId common.Pgid) bool + + // Rollback removes the pages from a given pending tx. + Rollback(txId common.Txid) + + // Copyall copies a list of all free ids and all pending ids in one sorted list. + // f.count returns the minimum length required for dst. + Copyall(dst []common.Pgid) + + // Reload reads the freelist from a page and filters out pending items. + Reload(p *common.Page) + + // NoSyncReload reads the freelist from Pgids and filters out pending items. + NoSyncReload(pgIds common.Pgids) + + // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available. + freePageIds() common.Pgids + + // pendingPageIds returns all pending pages by transaction id. + pendingPageIds() map[common.Txid]*txPending + + // release moves all page ids for a transaction id (or older) to the freelist. + release(txId common.Txid) + + // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. + releaseRange(begin, end common.Txid) + + // mergeSpans is merging the given pages into the freelist + mergeSpans(ids common.Pgids) +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go new file mode 100644 index 00000000000..8d471f4b5bf --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go @@ -0,0 +1,292 @@ +package freelist + +import ( + "fmt" + "reflect" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[common.Pgid]struct{} + +type hashMap struct { + *shared + + freePagesCount uint64 // count of free pages(hashmap version) + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size +} + +func (f *hashMap) Init(pgids common.Pgids) { + // reset the counter when freelist init + f.freePagesCount = 0 + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + + if len(pgids) == 0 { + return + } + + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + size := uint64(1) + start := pgids[0] + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } + + f.reindex() +} + +func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+common.Pgid(n), remain) + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +func (f *hashMap) FreeCount() int { + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) + return int(f.freePagesCount) +} + +func (f *hashMap) freePageIds() common.Pgids { + count := f.FreeCount() + if count == 0 { + return common.Pgids{} + } + + m := make([]common.Pgid, 0, count) + + startPageIds := make([]common.Pgid, 0, len(f.forwardMap)) + for k := range f.forwardMap { + startPageIds = append(startPageIds, k) + } + sort.Sort(common.Pgids(startPageIds)) + + for _, start := range startPageIds { + if size, ok := f.forwardMap[start]; ok { + for i := 0; i < int(size); i++ { + m = append(m, start+common.Pgid(i)) + } + } + } + + return m +} + +func (f *hashMap) hashmapFreeCountSlow() int { + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +func (f *hashMap) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[common.Pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} + f.freePagesCount += size +} + +func (f *hashMap) delSpan(start common.Pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+common.Pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } + f.freePagesCount -= size +} + +func (f *hashMap) mergeSpans(ids common.Pgids) { + common.Verify(func() { + ids1Freemap := f.idsFromFreemaps() + ids2Forward := f.idsFromForwardMap() + ids3Backward := f.idsFromBackwardMap() + + if !reflect.DeepEqual(ids1Freemap, ids2Forward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap)) + } + if !reflect.DeepEqual(ids1Freemap, ids3Backward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap)) + } + + sort.Sort(ids) + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.freemaps. + if _, ok := ids1Freemap[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps)) + } + } + }) + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - common.Pgid(preSize) + f.delSpan(start, preSize) + + newStart -= common.Pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +// idsFromFreemaps get all free page IDs from f.freemaps. +// used by test only. +func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for size, idSet := range f.freemaps { + for start := range idSet { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps)) + } + ids[id] = struct{}{} + } + } + } + return ids +} + +// idsFromForwardMap get all free page IDs from f.forwardMap. +// used by test only. +func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +// idsFromBackwardMap get all free page IDs from f.backwardMap. +// used by test only. +func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for end, size := range f.backwardMap { + for i := 0; i < int(size); i++ { + id := end - common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +func NewHashMapFreelist() Interface { + hm := &hashMap{ + shared: newShared(), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), + } + hm.Interface = hm + return hm +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go new file mode 100644 index 00000000000..f2d1130083f --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go @@ -0,0 +1,310 @@ +package freelist + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +type txPending struct { + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange +} + +type shared struct { + Interface + + readonlyTXIDs []common.Txid // all readonly transaction IDs. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. +} + +func newShared() *shared { + return &shared{ + pending: make(map[common.Txid]*txPending), + allocs: make(map[common.Pgid]common.Txid), + cache: make(map[common.Pgid]struct{}), + } +} + +func (t *shared) pendingPageIds() map[common.Txid]*txPending { + return t.pending +} + +func (t *shared) PendingCount() int { + var count int + for _, txp := range t.pending { + count += len(txp.ids) + } + return count +} + +func (t *shared) Count() int { + return t.FreeCount() + t.PendingCount() +} + +func (t *shared) Freed(pgId common.Pgid) bool { + _, ok := t.cache[pgId] + return ok +} + +func (t *shared) Free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) + } + + // Free page and all its overflow pages. + txp := t.pending[txid] + if txp == nil { + txp = &txPending{} + t.pending[txid] = txp + } + allocTxid, ok := t.allocs[p.Id()] + common.Verify(func() { + if allocTxid == txid { + panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid)) + } + }) + if ok { + delete(t.allocs, p.Id()) + } + + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { + // Verify that page is not already free. + if _, ok := t.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + t.cache[id] = struct{}{} + } +} + +func (t *shared) Rollback(txid common.Txid) { + // Remove page ids from cache. + txp := t.pending[txid] + if txp == nil { + return + } + for i, pgid := range txp.ids { + delete(t.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + t.allocs[pgid] = tx + } else { + // A writing TXN should never free a page which was allocated by itself. + panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid)) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(t.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range t.allocs { + if tid == txid { + delete(t.allocs, pgid) + } + } +} + +func (t *shared) AddReadonlyTXID(tid common.Txid) { + t.readonlyTXIDs = append(t.readonlyTXIDs, tid) +} + +func (t *shared) RemoveReadonlyTXID(tid common.Txid) { + for i := range t.readonlyTXIDs { + if t.readonlyTXIDs[i] == tid { + last := len(t.readonlyTXIDs) - 1 + t.readonlyTXIDs[i] = t.readonlyTXIDs[last] + t.readonlyTXIDs = t.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +func (t *shared) ReleasePendingPages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(t.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(t.readonlyTXIDs) > 0 { + minid = t.readonlyTXIDs[0] + } + if minid > 0 { + t.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range t.readonlyTXIDs { + t.releaseRange(minid, tid-1) + minid = tid + 1 + } + t.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} + +func (t *shared) release(txid common.Txid) { + m := make(common.Pgids, 0) + for tid, txp := range t.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +func (t *shared) releaseRange(begin, end common.Txid) { + if begin > end { + return + } + m := common.Pgids{} + for tid, txp := range t.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +// Copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (t *shared) Copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, t.PendingCount()) + for _, txp := range t.pendingPageIds() { + m = append(m, txp.ids...) + } + sort.Sort(m) + common.Mergepgids(dst, t.freePageIds(), m) +} + +func (t *shared) Reload(p *common.Page) { + t.Read(p) + t.NoSyncReload(t.freePageIds()) +} + +func (t *shared) NoSyncReload(pgIds common.Pgids) { + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + a := []common.Pgid{} + for _, id := range pgIds { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (t *shared) reindex() { + free := t.freePageIds() + pending := t.pendingPageIds() + t.cache = make(map[common.Pgid]struct{}, len(free)) + for _, id := range free { + t.cache[id] = struct{}{} + } + for _, txp := range pending { + for _, pendingID := range txp.ids { + t.cache[pendingID] = struct{}{} + } + } +} + +func (t *shared) Read(p *common.Page) { + if !p.IsFreelistPage() { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) + } + + ids := p.FreelistPageIds() + + // Copy the list of page ids from the freelist. + if len(ids) == 0 { + t.Init([]common.Pgid{}) + } else { + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]common.Pgid, len(ids)) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(common.Pgids(idsCopy)) + + t.Init(idsCopy) + } +} + +func (t *shared) EstimatedWritePageSize() int { + n := t.Count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) +} + +func (t *shared) Write(p *common.Page) { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.SetFlags(common.FreelistPageFlag) + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := t.Count() + if l == 0 { + p.SetCount(uint16(l)) + } else if l < 0xFFFF { + p.SetCount(uint16(l)) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l) + t.Copyall(ids) + } else { + p.SetCount(0xFFFF) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l+1) + ids[0] = common.Pgid(l) + t.Copyall(ids[1:]) + } +} diff --git a/vendor/go.etcd.io/bbolt/logger.go b/vendor/go.etcd.io/bbolt/logger.go new file mode 100644 index 00000000000..fb250894a29 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/logger.go @@ -0,0 +1,113 @@ +package bbolt + +// See https://github.com/etcd-io/raft/blob/main/logger.go +import ( + "fmt" + "io" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func getDiscardLogger() Logger { + return discardLogger +} + +var ( + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go index 744a972f51a..9a0fd332c97 100644 --- a/vendor/go.etcd.io/bbolt/mlock_unix.go +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package bbolt diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 9c56150d883..022b1001e27 100644 --- a/vendor/go.etcd.io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -4,7 +4,8 @@ import ( "bytes" "fmt" "sort" - "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // node represents an in-memory, deserialized page. @@ -14,10 +15,10 @@ type node struct { unbalanced bool spilled bool key []byte - pgid pgid + pgid common.Pgid parent *node children nodes - inodes inodes + inodes common.Inodes } // root returns the top-level node this node is attached to. @@ -38,10 +39,10 @@ func (n *node) minKeys() int { // size returns the size of the node after serialization. func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) } return int(sz) } @@ -50,10 +51,10 @@ func (n *node) size() int { // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) if sz >= v { return false } @@ -64,9 +65,9 @@ func (n *node) sizeLessThan(v uintptr) bool { // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() uintptr { if n.isLeaf { - return leafPageElementSize + return common.LeafPageElementSize } - return branchPageElementSize + return common.BranchPageElementSize } // childAt returns the child node at a given index. @@ -74,12 +75,12 @@ func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } - return n.bucket.node(n.inodes[index].pgid, n) + return n.bucket.node(n.inodes[index].Pgid(), n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 }) return index } @@ -113,9 +114,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.Pgid() { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid())) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -123,30 +124,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { } // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey) if !exact { - n.inodes = append(n.inodes, inode{}) + n.inodes = append(n.inodes, common.Inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") + inode.SetFlags(flags) + inode.SetKey(newKey) + inode.SetValue(value) + inode.SetPgid(pgId) + common.Assert(len(inode.Key()) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 }) // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) { return } @@ -158,30 +159,15 @@ func (n *node) del(key []byte) { } // read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } +func (n *node) read(p *common.Page) { + n.pgid = p.Id() + n.isLeaf = p.IsLeafPage() + n.inodes = common.ReadInodeFromPage(p) - // Save first key so we can find the node in the parent when we spill. + // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") + n.key = n.inodes[0].Key() + common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } @@ -190,57 +176,27 @@ func (n *node) read(p *page) { // write writes the items onto one or more pages. // The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set // and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") +func (n *node) write(p *common.Page) { + common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page") // Initialize page. if n.isLeaf { - p.flags = leafPageFlag + p.SetFlags(common.LeafPageFlag) } else { - p.flags = branchPageFlag + p.SetFlags(common.BranchPageFlag) } if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id())) } - p.count = uint16(len(n.inodes)) + p.SetCount(uint16(len(n.inodes))) // Stop here if there are no items to write. - if p.count == 0 { + if p.Count() == 0 { return } - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } + common.WriteInodeToPage(n.inodes, p) // DEBUG ONLY: n.dump() } @@ -273,7 +229,7 @@ func (n *node) split(pageSize uintptr) []*node { func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } @@ -313,17 +269,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize + sz = common.PageHeaderSize // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -360,7 +316,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -371,10 +327,10 @@ func (n *node) spill() error { } // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + if p.Id() >= tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid())) } - node.pgid = p.id + node.pgid = p.Id() node.write(p) node.spilled = true @@ -382,12 +338,12 @@ func (n *node) spill() error { if node.parent != nil { var key = node.key if key == nil { - key = node.inodes[0].key + key = node.inodes[0].Key() } - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") + node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0) + node.key = node.inodes[0].Key() + common.Assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. @@ -415,8 +371,8 @@ func (n *node) rebalance() { // Update statistics. n.bucket.tx.stats.IncRebalance(1) - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 + // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys. + var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } @@ -426,14 +382,14 @@ func (n *node) rebalance() { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) + child := n.bucket.node(n.inodes[0].Pgid(), n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent = n } } @@ -457,53 +413,37 @@ func (n *node) rebalance() { return } - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) + // Merge with right sibling if idx == 0, otherwise left sibling. + var leftNode, rightNode *node + var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { - target = n.nextSibling() + leftNode = n + rightNode = n.nextSibling() } else { - target = n.prevSibling() + leftNode = n.prevSibling() + rightNode = n } - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } + // If both nodes are too small then merge them. + // Reparent all child nodes being moved. + for _, inode := range rightNode.inodes { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { + child.parent.removeChild(child) + child.parent = leftNode + child.parent.children = append(child.parent.children, child) } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() } - // Either this node or the target node was deleted from the parent so rebalance it. + // Copy over inodes from right node to left node and remove right node. + leftNode.inodes = append(leftNode.inodes, rightNode.inodes...) + n.parent.del(rightNode.key) + n.parent.removeChild(rightNode) + delete(n.bucket.nodes, rightNode.pgid) + rightNode.free() + + // Either this node or the sibling node was deleted from the parent so rebalance it. n.parent.rebalance() } @@ -525,20 +465,20 @@ func (n *node) dereference() { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { inode := &n.inodes[i] - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") + key := make([]byte, len(inode.Key())) + copy(key, inode.Key()) + inode.SetKey(key) + common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key") - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value + value := make([]byte, len(inode.Value())) + copy(value, inode.Value()) + inode.SetValue(value) } // Recursively dereference children. @@ -553,7 +493,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } @@ -594,17 +534,5 @@ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 + return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index bb081b031e6..00000000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,212 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 766395de3be..7b5db772786 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -5,15 +5,16 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strings" "sync/atomic" "time" "unsafe" -) -// txid represents the internal transaction identifier. -type txid uint64 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" +) // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. @@ -27,9 +28,9 @@ type Tx struct { writable bool managed bool db *DB - meta *meta + meta *common.Meta root Bucket - pages map[pgid]*page + pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() @@ -48,24 +49,27 @@ func (tx *Tx) init(db *DB) { tx.pages = nil // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) + tx.meta = &common.Meta{} + db.meta().Copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root + tx.root.InBucket = &common.InBucket{} + *tx.root.InBucket = *(tx.meta.RootBucket()) // Increment the transaction id and add a page cache for writable transactions. if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) + tx.pages = make(map[common.Pgid]*common.Page) + tx.meta.IncTxid() } } // ID returns the transaction id. func (tx *Tx) ID() int { - return int(tx.meta.txid) + if tx == nil || tx.meta == nil { + return -1 + } + return int(tx.meta.Txid()) } // DB returns a reference to the database that created the transaction. @@ -75,7 +79,7 @@ func (tx *Tx) DB() *DB { // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) + return int64(tx.meta.Pgid()) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. @@ -96,6 +100,11 @@ func (tx *Tx) Stats() TxStats { return tx.stats } +// Inspect returns the structure of the database. +func (tx *Tx) Inspect() BucketStructure { + return tx.root.Inspect() +} + // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. @@ -123,6 +132,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. @@ -137,15 +164,28 @@ func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } -// Commit writes all changes to disk and updates the meta page. +// Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") +func (tx *Tx) Commit() (err error) { + txId := tx.ID() + lg := tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + } + + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } else if !tx.writable { - return ErrTxNotWritable + return berrors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -157,40 +197,43 @@ func (tx *Tx) Commit() error { tx.stats.IncRebalanceTime(time.Since(startTime)) } - opgid := tx.meta.pgid + opgid := tx.meta.Pgid() // spill data onto dirty pages. startTime = time.Now() - if err := tx.root.spill(); err != nil { + if err = tx.root.spill(); err != nil { + lg.Errorf("spilling data onto dirty pages failed: %v", err) tx.rollback() return err } tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. - tx.meta.root.root = tx.root.root + tx.meta.RootBucket().SetRootPage(tx.root.RootPage()) // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + if tx.meta.Freelist() != common.PgidNoFreelist { + tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { - err := tx.commitFreelist() + err = tx.commitFreelist() if err != nil { + lg.Errorf("committing freelist failed: %v", err) return err } } else { - tx.meta.freelist = pgidNoFreelist + tx.meta.SetFreelist(common.PgidNoFreelist) } // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { + if tx.meta.Pgid() > opgid { _ = errors.New("") // gofail: var lackOfDiskSpace string // tx.rollback() // return errors.New(lackOfDiskSpace) - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err) tx.rollback() return err } @@ -198,7 +241,8 @@ func (tx *Tx) Commit() error { // Write dirty pages to disk. startTime = time.Now() - if err := tx.write(); err != nil { + if err = tx.write(); err != nil { + lg.Errorf("writing data failed: %v", err) tx.rollback() return err } @@ -208,11 +252,11 @@ func (tx *Tx) Commit() error { ch := tx.Check() var errs []string for { - err, ok := <-ch + chkErr, ok := <-ch if !ok { break } - errs = append(errs, err.Error()) + errs = append(errs, chkErr.Error()) } if len(errs) > 0 { panic("check fail: " + strings.Join(errs, "\n")) @@ -220,7 +264,8 @@ func (tx *Tx) Commit() error { } // Write meta to disk. - if err := tx.writeMeta(); err != nil { + if err = tx.writeMeta(); err != nil { + lg.Errorf("writeMeta failed: %v", err) tx.rollback() return err } @@ -240,16 +285,14 @@ func (tx *Tx) Commit() error { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id + + tx.db.freelist.Write(p) + tx.meta.SetFreelist(p.Id()) return nil } @@ -257,9 +300,9 @@ func (tx *Tx) commitFreelist() error { // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") + common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -271,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) } tx.close() } @@ -282,17 +325,17 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { if !tx.db.hasSyncedFreelist() { // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) + // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.NoSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -305,9 +348,9 @@ func (tx *Tx) close() { } if tx.writable { // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() + var freelistFreeN = tx.db.freelist.FreeCount() + var freelistPendingN = tx.db.freelist.PendingCount() + var freelistAlloc = tx.db.freelist.EstimatedWritePageSize() // Remove transaction ref & writer lock. tx.db.rwtx = nil @@ -335,7 +378,7 @@ func (tx *Tx) close() { // Copy writes the entire database to a writer. // This function exists for backwards compatibility. // -// Deprecated; Use WriteTo() instead. +// Deprecated: Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -357,13 +400,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.MetaPageFlag) + *page.Meta() = *tx.meta // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() + page.SetId(0) + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err := w.Write(buf) n += int64(nn) if err != nil { @@ -371,9 +414,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() + page.SetId(1) + page.Meta().DecTxid() + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err = w.Write(buf) n += int64(nn) if err != nil { @@ -413,14 +456,16 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { } // allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) +func (tx *Tx) allocate(count int) (*common.Page, error) { + lg := tx.db.Logger() + p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { + lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err) return nil, err } // Save to our page cache. - tx.pages[p.id] = p + tx.pages[p.Id()] = p // Update statistics. tx.stats.IncPageCount(int64(count)) @@ -432,18 +477,19 @@ func (tx *Tx) allocate(count int) (*page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) + lg := tx.db.Logger() + pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } // Clear out page cache early. - tx.pages = make(map[pgid]*page) + tx.pages = make(map[common.Pgid]*common.Page) sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) + rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize) + offset := int64(p.Id()) * int64(tx.db.pageSize) var written uintptr // Write out page in "max allocation" sized chunks. @@ -452,9 +498,10 @@ func (tx *Tx) write() error { if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + lg.Errorf("writeAt failed, offset: %d: %w", offset, err) return err } @@ -474,9 +521,10 @@ func (tx *Tx) write() error { } // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -485,11 +533,11 @@ func (tx *Tx) write() error { for _, p := range pages { // Ignore page sizes over 1 page. // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { + if int(p.Overflow()) != 0 { continue } - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { @@ -503,18 +551,24 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { + // gofail: var beforeWriteMetaError string + // return errors.New(beforeWriteMetaError) + // Create a temporary buffer for the meta page. + lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) + tx.meta.Write(p) // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -527,69 +581,69 @@ func (tx *Tx) writeMeta() error { // page returns a reference to the page with a given id. // If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { +func (tx *Tx) page(id common.Pgid) *common.Page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { - p.fastCheck(id) + p.FastCheck(id) return p } } // Otherwise return directly from the mmap. p := tx.db.page(id) - p.fastCheck(id) + p.FastCheck(id) return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) +func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) { + stack := make([]common.Pgid, 10) stack[0] = pgidnum tx.forEachPageInternal(stack[:1], fn) } -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { +func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) { p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { +func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { + return nil, berrors.ErrTxClosed + } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded + return nil, berrors.ErrFreePagesNotLoaded } // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ + p := tx.db.page(common.Pgid(id)) + info := &common.PageInfo{ ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), + Count: int(p.Count()), + OverflowCount: int(p.Overflow()), } // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { + if tx.db.freelist.Freed(common.Pgid(id)) { info.Type = "free" } else { - info.Type = p.typ() + info.Type = p.Typ() } return info, nil diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go index 75c7c08436d..c3ecbb97507 100644 --- a/vendor/go.etcd.io/bbolt/tx_check.go +++ b/vendor/go.etcd.io/bbolt/tx_check.go @@ -3,6 +3,8 @@ package bbolt import ( "encoding/hex" "fmt" + + "go.etcd.io/bbolt/internal/common" ) // Check performs several consistency checks on the database for this transaction. @@ -13,13 +15,10 @@ import ( // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// +// It also allows users to provide a customized `KVStringer` implementation, // so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { +func (tx *Tx) Check(options ...CheckOption) <-chan error { chkConfig := checkConfig{ kvStringer: HexKVStringer(), } @@ -28,18 +27,22 @@ func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { } ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) + go func() { + // Close the channel to signal completion. + defer close(ch) + tx.check(chkConfig, ch) + }() return ch } -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { +func (tx *Tx) check(cfg checkConfig, ch chan error) { // Force loading free list if opened in ReadOnly mode. tx.db.loadFreelist() // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) + freed := make(map[common.Pgid]bool) + all := make([]common.Pgid, tx.db.freelist.Count()) + tx.db.freelist.Copyall(all) for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) @@ -48,118 +51,171 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Track every reachable page. - reachable := make(map[pgid]*page) + reachable := make(map[common.Pgid]*common.Page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.Freelist() != common.PgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ { + reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist()) } } - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + if cfg.pageId == 0 { + // Check the whole db file, starting from the root bucket and + // recursively check all child buckets. + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + // Ensure all pages below high water mark are either reachable or freed. + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } } + } else { + // Check the db file starting from a specified pageId. + if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) { + ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) + return + } + + tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch) } +} - // Close the channel to signal completion. - close(ch) +func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, +func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } + p := tx.page(pageId) - // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + switch { + case p.IsBranchPage(): + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch) } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + case p.IsLeafPage(): + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + if elem.IsBucketEntry() { + inBkt := common.NewInBucket(pageId, 0) + tmpBucket := Bucket{ + InBucket: &inBkt, + rootNode: &node{isLeaf: p.IsLeafPage()}, + FillPercent: DefaultFillPercent, + tx: tx, + } + if child := tmpBucket.Bucket(elem.Key()); child != nil { + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) + } } - reachable[id] = p } + default: + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId) + } +} - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) - } - }) +func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.RootPage() == 0 { + return + } - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } return nil }) } -// recursivelyCheckPages confirms database consistency with respect to b-tree +func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) { + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) + }) + + tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch) +} + +func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { + if p.Id() > hwm { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) + } + + // Ensure each page is only referenced once. + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if !p.IsBranchPage() && !p.IsLeafPage() { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) + } +} + +// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch) } -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are: // - >=`minKeyClosed` (can be nil) // - <`maxKeyOpen` (can be nil) // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, +func (tx *Tx) recursivelyCheckPageKeyOrderInternal( + pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.flags&branchPageFlag != 0: + case p.IsBranchPage(): // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() + if i < len(p.BranchPageElements())-1 { + maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.flags&leafPageFlag != 0: + case p.IsLeafPage(): runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.Key() } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() + if p.Count() > 0 { + return p.LeafPageElement(p.Count() - 1).Key() } default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId) } return maxKeyInSubtree } @@ -168,7 +224,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { +func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) { if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) @@ -194,6 +250,7 @@ func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousK type checkConfig struct { kvStringer KVStringer + pageId uint64 } type CheckOption func(options *checkConfig) @@ -204,6 +261,13 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } } +// WithPageId sets a page ID from which the check command starts to check +func WithPageId(pageId uint64) CheckOption { + return func(c *checkConfig) { + c.pageId = pageId + } +} + // KVStringer allows to prepare human-readable diagnostic messages. type KVStringer interface { KeyToString([]byte) string diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index cfda8934332..0d5050fe405 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -37,6 +37,8 @@ package inspector import ( "go/ast" _ "unsafe" + + "golang.org/x/tools/internal/astutil/edge" ) // An Inspector provides methods for inspecting @@ -48,6 +50,21 @@ type Inspector struct { //go:linkname events func events(in *Inspector) []event { return in.events } +func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { + return int32(uint32(index+1)<<7 | uint32(ek)) +} + +// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within +// an []ast.Node slice) from the parent field of a pop event. +// +//go:linkname unpackEdgeKindAndIndex +func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { + // The "parent" field of a pop node holds the + // edge Kind in the lower 7 bits and the index+1 + // in the upper 25. + return edge.Kind(x & 0x7f), int(x>>7) - 1 +} + // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -59,7 +76,7 @@ type event struct { node ast.Node typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events index int32 // index of corresponding push or pop event - parent int32 // index of parent's push node (defined for push nodes only) + parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only) } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -194,49 +211,74 @@ func traverse(files []*ast.File) []event { extent += int(f.End() - f.Pos()) } // This estimate is based on the net/http package. - capacity := extent * 33 / 100 - if capacity > 1e6 { - capacity = 1e6 // impose some reasonable maximum + capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M) + + v := &visitor{ + events: make([]event, 0, capacity), + stack: []item{{index: -1}}, // include an extra event so file nodes have a parent } - events := make([]event, 0, capacity) + for _, file := range files { + walk(v, edge.Invalid, -1, file) + } + return v.events +} - var stack []event - stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent - for _, f := range files { - ast.Inspect(f, func(n ast.Node) bool { - if n != nil { - // push - ev := event{ - node: n, - typ: 0, // temporarily used to accumulate type bits of subtree - index: int32(len(events)), // push event temporarily holds own index - parent: stack[len(stack)-1].index, - } - stack = append(stack, ev) - events = append(events, ev) +type visitor struct { + events []event + stack []item +} - // 2B nodes ought to be enough for anyone! - if int32(len(events)) < 0 { - panic("event index exceeded int32") - } - } else { - // pop - top := len(stack) - 1 - ev := stack[top] - typ := typeOf(ev.node) - push := ev.index - parent := top - 1 - - events[push].typ = typ // set type of push - stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. - events[push].index = int32(len(events)) // make push refer to pop - - stack = stack[:top] - events = append(events, ev) - } - return true - }) +type item struct { + index int32 // index of current node's push event + parentIndex int32 // index of parent node's push event + typAccum uint64 // accumulated type bits of current node's descendents + edgeKindAndIndex int32 // edge.Kind and index, bit packed +} + +func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) { + var ( + index = int32(len(v.events)) + parentIndex = v.stack[len(v.stack)-1].index + ) + v.events = append(v.events, event{ + node: node, + parent: parentIndex, + typ: typeOf(node), + index: 0, // (pop index is set later by visitor.pop) + }) + v.stack = append(v.stack, item{ + index: index, + parentIndex: parentIndex, + edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex), + }) + + // 2B nodes ought to be enough for anyone! + if int32(len(v.events)) < 0 { + panic("event index exceeded int32") + } + + // 32M elements in an []ast.Node ought to be enough for anyone! + if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex { + panic("Node slice index exceeded uint25") } +} + +func (v *visitor) pop(node ast.Node) { + top := len(v.stack) - 1 + current := v.stack[top] + + push := &v.events[current.index] + parent := &v.stack[top-1] + + push.index = int32(len(v.events)) // make push event refer to pop + parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent + + v.stack = v.stack[:top] - return events + v.events = append(v.events, event{ + node: node, + typ: current.typAccum, + index: current.index, + parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex] + }) } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 40b1bfd7e62..97784484578 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -219,7 +219,7 @@ func typeOf(n ast.Node) uint64 { //go:linkname maskOf func maskOf(nodes []ast.Node) uint64 { - if nodes == nil { + if len(nodes) == 0 { return math.MaxUint64 // match all node types } var mask uint64 diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go new file mode 100644 index 00000000000..5a42174a0a0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -0,0 +1,341 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file is a fork of ast.Inspect to reduce unnecessary dynamic +// calls and to gather edge information. +// +// Consistency with the original is ensured by TestInspectAllNodes. + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/internal/astutil/edge" +) + +func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { + for i, node := range list { + walk(v, ek, i, node) + } +} + +func walk(v *visitor, ek edge.Kind, index int, node ast.Node) { + v.push(ek, index, node) + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + walkList(v, edge.CommentGroup_List, n.List) + + case *ast.Field: + if n.Doc != nil { + walk(v, edge.Field_Doc, -1, n.Doc) + } + walkList(v, edge.Field_Names, n.Names) + if n.Type != nil { + walk(v, edge.Field_Type, -1, n.Type) + } + if n.Tag != nil { + walk(v, edge.Field_Tag, -1, n.Tag) + } + if n.Comment != nil { + walk(v, edge.Field_Comment, -1, n.Comment) + } + + case *ast.FieldList: + walkList(v, edge.FieldList_List, n.List) + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + if n.Elt != nil { + walk(v, edge.Ellipsis_Elt, -1, n.Elt) + } + + case *ast.FuncLit: + walk(v, edge.FuncLit_Type, -1, n.Type) + walk(v, edge.FuncLit_Body, -1, n.Body) + + case *ast.CompositeLit: + if n.Type != nil { + walk(v, edge.CompositeLit_Type, -1, n.Type) + } + walkList(v, edge.CompositeLit_Elts, n.Elts) + + case *ast.ParenExpr: + walk(v, edge.ParenExpr_X, -1, n.X) + + case *ast.SelectorExpr: + walk(v, edge.SelectorExpr_X, -1, n.X) + walk(v, edge.SelectorExpr_Sel, -1, n.Sel) + + case *ast.IndexExpr: + walk(v, edge.IndexExpr_X, -1, n.X) + walk(v, edge.IndexExpr_Index, -1, n.Index) + + case *ast.IndexListExpr: + walk(v, edge.IndexListExpr_X, -1, n.X) + walkList(v, edge.IndexListExpr_Indices, n.Indices) + + case *ast.SliceExpr: + walk(v, edge.SliceExpr_X, -1, n.X) + if n.Low != nil { + walk(v, edge.SliceExpr_Low, -1, n.Low) + } + if n.High != nil { + walk(v, edge.SliceExpr_High, -1, n.High) + } + if n.Max != nil { + walk(v, edge.SliceExpr_Max, -1, n.Max) + } + + case *ast.TypeAssertExpr: + walk(v, edge.TypeAssertExpr_X, -1, n.X) + if n.Type != nil { + walk(v, edge.TypeAssertExpr_Type, -1, n.Type) + } + + case *ast.CallExpr: + walk(v, edge.CallExpr_Fun, -1, n.Fun) + walkList(v, edge.CallExpr_Args, n.Args) + + case *ast.StarExpr: + walk(v, edge.StarExpr_X, -1, n.X) + + case *ast.UnaryExpr: + walk(v, edge.UnaryExpr_X, -1, n.X) + + case *ast.BinaryExpr: + walk(v, edge.BinaryExpr_X, -1, n.X) + walk(v, edge.BinaryExpr_Y, -1, n.Y) + + case *ast.KeyValueExpr: + walk(v, edge.KeyValueExpr_Key, -1, n.Key) + walk(v, edge.KeyValueExpr_Value, -1, n.Value) + + // Types + case *ast.ArrayType: + if n.Len != nil { + walk(v, edge.ArrayType_Len, -1, n.Len) + } + walk(v, edge.ArrayType_Elt, -1, n.Elt) + + case *ast.StructType: + walk(v, edge.StructType_Fields, -1, n.Fields) + + case *ast.FuncType: + if n.TypeParams != nil { + walk(v, edge.FuncType_TypeParams, -1, n.TypeParams) + } + if n.Params != nil { + walk(v, edge.FuncType_Params, -1, n.Params) + } + if n.Results != nil { + walk(v, edge.FuncType_Results, -1, n.Results) + } + + case *ast.InterfaceType: + walk(v, edge.InterfaceType_Methods, -1, n.Methods) + + case *ast.MapType: + walk(v, edge.MapType_Key, -1, n.Key) + walk(v, edge.MapType_Value, -1, n.Value) + + case *ast.ChanType: + walk(v, edge.ChanType_Value, -1, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + walk(v, edge.DeclStmt_Decl, -1, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + walk(v, edge.LabeledStmt_Label, -1, n.Label) + walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt) + + case *ast.ExprStmt: + walk(v, edge.ExprStmt_X, -1, n.X) + + case *ast.SendStmt: + walk(v, edge.SendStmt_Chan, -1, n.Chan) + walk(v, edge.SendStmt_Value, -1, n.Value) + + case *ast.IncDecStmt: + walk(v, edge.IncDecStmt_X, -1, n.X) + + case *ast.AssignStmt: + walkList(v, edge.AssignStmt_Lhs, n.Lhs) + walkList(v, edge.AssignStmt_Rhs, n.Rhs) + + case *ast.GoStmt: + walk(v, edge.GoStmt_Call, -1, n.Call) + + case *ast.DeferStmt: + walk(v, edge.DeferStmt_Call, -1, n.Call) + + case *ast.ReturnStmt: + walkList(v, edge.ReturnStmt_Results, n.Results) + + case *ast.BranchStmt: + if n.Label != nil { + walk(v, edge.BranchStmt_Label, -1, n.Label) + } + + case *ast.BlockStmt: + walkList(v, edge.BlockStmt_List, n.List) + + case *ast.IfStmt: + if n.Init != nil { + walk(v, edge.IfStmt_Init, -1, n.Init) + } + walk(v, edge.IfStmt_Cond, -1, n.Cond) + walk(v, edge.IfStmt_Body, -1, n.Body) + if n.Else != nil { + walk(v, edge.IfStmt_Else, -1, n.Else) + } + + case *ast.CaseClause: + walkList(v, edge.CaseClause_List, n.List) + walkList(v, edge.CaseClause_Body, n.Body) + + case *ast.SwitchStmt: + if n.Init != nil { + walk(v, edge.SwitchStmt_Init, -1, n.Init) + } + if n.Tag != nil { + walk(v, edge.SwitchStmt_Tag, -1, n.Tag) + } + walk(v, edge.SwitchStmt_Body, -1, n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + walk(v, edge.TypeSwitchStmt_Init, -1, n.Init) + } + walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign) + walk(v, edge.TypeSwitchStmt_Body, -1, n.Body) + + case *ast.CommClause: + if n.Comm != nil { + walk(v, edge.CommClause_Comm, -1, n.Comm) + } + walkList(v, edge.CommClause_Body, n.Body) + + case *ast.SelectStmt: + walk(v, edge.SelectStmt_Body, -1, n.Body) + + case *ast.ForStmt: + if n.Init != nil { + walk(v, edge.ForStmt_Init, -1, n.Init) + } + if n.Cond != nil { + walk(v, edge.ForStmt_Cond, -1, n.Cond) + } + if n.Post != nil { + walk(v, edge.ForStmt_Post, -1, n.Post) + } + walk(v, edge.ForStmt_Body, -1, n.Body) + + case *ast.RangeStmt: + if n.Key != nil { + walk(v, edge.RangeStmt_Key, -1, n.Key) + } + if n.Value != nil { + walk(v, edge.RangeStmt_Value, -1, n.Value) + } + walk(v, edge.RangeStmt_X, -1, n.X) + walk(v, edge.RangeStmt_Body, -1, n.Body) + + // Declarations + case *ast.ImportSpec: + if n.Doc != nil { + walk(v, edge.ImportSpec_Doc, -1, n.Doc) + } + if n.Name != nil { + walk(v, edge.ImportSpec_Name, -1, n.Name) + } + walk(v, edge.ImportSpec_Path, -1, n.Path) + if n.Comment != nil { + walk(v, edge.ImportSpec_Comment, -1, n.Comment) + } + + case *ast.ValueSpec: + if n.Doc != nil { + walk(v, edge.ValueSpec_Doc, -1, n.Doc) + } + walkList(v, edge.ValueSpec_Names, n.Names) + if n.Type != nil { + walk(v, edge.ValueSpec_Type, -1, n.Type) + } + walkList(v, edge.ValueSpec_Values, n.Values) + if n.Comment != nil { + walk(v, edge.ValueSpec_Comment, -1, n.Comment) + } + + case *ast.TypeSpec: + if n.Doc != nil { + walk(v, edge.TypeSpec_Doc, -1, n.Doc) + } + walk(v, edge.TypeSpec_Name, -1, n.Name) + if n.TypeParams != nil { + walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams) + } + walk(v, edge.TypeSpec_Type, -1, n.Type) + if n.Comment != nil { + walk(v, edge.TypeSpec_Comment, -1, n.Comment) + } + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + if n.Doc != nil { + walk(v, edge.GenDecl_Doc, -1, n.Doc) + } + walkList(v, edge.GenDecl_Specs, n.Specs) + + case *ast.FuncDecl: + if n.Doc != nil { + walk(v, edge.FuncDecl_Doc, -1, n.Doc) + } + if n.Recv != nil { + walk(v, edge.FuncDecl_Recv, -1, n.Recv) + } + walk(v, edge.FuncDecl_Name, -1, n.Name) + walk(v, edge.FuncDecl_Type, -1, n.Type) + if n.Body != nil { + walk(v, edge.FuncDecl_Body, -1, n.Body) + } + + case *ast.File: + if n.Doc != nil { + walk(v, edge.File_Doc, -1, n.Doc) + } + walk(v, edge.File_Name, -1, n.Name) + walkList(v, edge.File_Decls, n.Decls) + // don't walk n.Comments - they have been + // visited already through the individual + // nodes + + default: + // (includes *ast.Package) + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.pop(node) +} diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go new file mode 100644 index 00000000000..4f6ccfd6e5e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edge defines identifiers for each field of an ast.Node +// struct type that refers to another Node. +package edge + +import ( + "fmt" + "go/ast" + "reflect" +) + +// A Kind describes a field of an ast.Node struct. +type Kind uint8 + +// String returns a description of the edge kind. +func (k Kind) String() string { + if k == Invalid { + return "" + } + info := fieldInfos[k] + return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name) +} + +// NodeType returns the pointer-to-struct type of the ast.Node implementation. +func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType } + +// FieldName returns the name of the field. +func (k Kind) FieldName() string { return fieldInfos[k].name } + +// FieldType returns the declared type of the field. +func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType } + +// Get returns the direct child of n identified by (k, idx). +// n's type must match k.NodeType(). +// idx must be a valid slice index, or -1 for a non-slice. +func (k Kind) Get(n ast.Node, idx int) ast.Node { + if k.NodeType() != reflect.TypeOf(n) { + panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n)) + } + v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index) + if idx != -1 { + v = v.Index(idx) // asserts valid index + } else { + // (The type assertion below asserts that v is not a slice.) + } + return v.Interface().(ast.Node) // may be nil +} + +const ( + Invalid Kind = iota // for nodes at the root of the traversal + + // Kinds are sorted alphabetically. + // Numbering is not stable. + // Each is named Type_Field, where Type is the + // ast.Node struct type and Field is the name of the field + + ArrayType_Elt + ArrayType_Len + AssignStmt_Lhs + AssignStmt_Rhs + BinaryExpr_X + BinaryExpr_Y + BlockStmt_List + BranchStmt_Label + CallExpr_Args + CallExpr_Fun + CaseClause_Body + CaseClause_List + ChanType_Value + CommClause_Body + CommClause_Comm + CommentGroup_List + CompositeLit_Elts + CompositeLit_Type + DeclStmt_Decl + DeferStmt_Call + Ellipsis_Elt + ExprStmt_X + FieldList_List + Field_Comment + Field_Doc + Field_Names + Field_Tag + Field_Type + File_Decls + File_Doc + File_Name + ForStmt_Body + ForStmt_Cond + ForStmt_Init + ForStmt_Post + FuncDecl_Body + FuncDecl_Doc + FuncDecl_Name + FuncDecl_Recv + FuncDecl_Type + FuncLit_Body + FuncLit_Type + FuncType_Params + FuncType_Results + FuncType_TypeParams + GenDecl_Doc + GenDecl_Specs + GoStmt_Call + IfStmt_Body + IfStmt_Cond + IfStmt_Else + IfStmt_Init + ImportSpec_Comment + ImportSpec_Doc + ImportSpec_Name + ImportSpec_Path + IncDecStmt_X + IndexExpr_Index + IndexExpr_X + IndexListExpr_Indices + IndexListExpr_X + InterfaceType_Methods + KeyValueExpr_Key + KeyValueExpr_Value + LabeledStmt_Label + LabeledStmt_Stmt + MapType_Key + MapType_Value + ParenExpr_X + RangeStmt_Body + RangeStmt_Key + RangeStmt_Value + RangeStmt_X + ReturnStmt_Results + SelectStmt_Body + SelectorExpr_Sel + SelectorExpr_X + SendStmt_Chan + SendStmt_Value + SliceExpr_High + SliceExpr_Low + SliceExpr_Max + SliceExpr_X + StarExpr_X + StructType_Fields + SwitchStmt_Body + SwitchStmt_Init + SwitchStmt_Tag + TypeAssertExpr_Type + TypeAssertExpr_X + TypeSpec_Comment + TypeSpec_Doc + TypeSpec_Name + TypeSpec_Type + TypeSpec_TypeParams + TypeSwitchStmt_Assign + TypeSwitchStmt_Body + TypeSwitchStmt_Init + UnaryExpr_X + ValueSpec_Comment + ValueSpec_Doc + ValueSpec_Names + ValueSpec_Type + ValueSpec_Values + + maxKind +) + +// Assert that the encoding fits in 7 bits, +// as the inspector relies on this. +// (We are currently at 104.) +var _ = [1 << 7]struct{}{}[maxKind] + +type fieldInfo struct { + nodeType reflect.Type // pointer-to-struct type of ast.Node implementation + name string + index int + fieldType reflect.Type +} + +func info[N ast.Node](fieldName string) fieldInfo { + nodePtrType := reflect.TypeFor[N]() + f, ok := nodePtrType.Elem().FieldByName(fieldName) + if !ok { + panic(fieldName) + } + return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type} +} + +var fieldInfos = [...]fieldInfo{ + Invalid: {}, + ArrayType_Elt: info[*ast.ArrayType]("Elt"), + ArrayType_Len: info[*ast.ArrayType]("Len"), + AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"), + AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"), + BinaryExpr_X: info[*ast.BinaryExpr]("X"), + BinaryExpr_Y: info[*ast.BinaryExpr]("Y"), + BlockStmt_List: info[*ast.BlockStmt]("List"), + BranchStmt_Label: info[*ast.BranchStmt]("Label"), + CallExpr_Args: info[*ast.CallExpr]("Args"), + CallExpr_Fun: info[*ast.CallExpr]("Fun"), + CaseClause_Body: info[*ast.CaseClause]("Body"), + CaseClause_List: info[*ast.CaseClause]("List"), + ChanType_Value: info[*ast.ChanType]("Value"), + CommClause_Body: info[*ast.CommClause]("Body"), + CommClause_Comm: info[*ast.CommClause]("Comm"), + CommentGroup_List: info[*ast.CommentGroup]("List"), + CompositeLit_Elts: info[*ast.CompositeLit]("Elts"), + CompositeLit_Type: info[*ast.CompositeLit]("Type"), + DeclStmt_Decl: info[*ast.DeclStmt]("Decl"), + DeferStmt_Call: info[*ast.DeferStmt]("Call"), + Ellipsis_Elt: info[*ast.Ellipsis]("Elt"), + ExprStmt_X: info[*ast.ExprStmt]("X"), + FieldList_List: info[*ast.FieldList]("List"), + Field_Comment: info[*ast.Field]("Comment"), + Field_Doc: info[*ast.Field]("Doc"), + Field_Names: info[*ast.Field]("Names"), + Field_Tag: info[*ast.Field]("Tag"), + Field_Type: info[*ast.Field]("Type"), + File_Decls: info[*ast.File]("Decls"), + File_Doc: info[*ast.File]("Doc"), + File_Name: info[*ast.File]("Name"), + ForStmt_Body: info[*ast.ForStmt]("Body"), + ForStmt_Cond: info[*ast.ForStmt]("Cond"), + ForStmt_Init: info[*ast.ForStmt]("Init"), + ForStmt_Post: info[*ast.ForStmt]("Post"), + FuncDecl_Body: info[*ast.FuncDecl]("Body"), + FuncDecl_Doc: info[*ast.FuncDecl]("Doc"), + FuncDecl_Name: info[*ast.FuncDecl]("Name"), + FuncDecl_Recv: info[*ast.FuncDecl]("Recv"), + FuncDecl_Type: info[*ast.FuncDecl]("Type"), + FuncLit_Body: info[*ast.FuncLit]("Body"), + FuncLit_Type: info[*ast.FuncLit]("Type"), + FuncType_Params: info[*ast.FuncType]("Params"), + FuncType_Results: info[*ast.FuncType]("Results"), + FuncType_TypeParams: info[*ast.FuncType]("TypeParams"), + GenDecl_Doc: info[*ast.GenDecl]("Doc"), + GenDecl_Specs: info[*ast.GenDecl]("Specs"), + GoStmt_Call: info[*ast.GoStmt]("Call"), + IfStmt_Body: info[*ast.IfStmt]("Body"), + IfStmt_Cond: info[*ast.IfStmt]("Cond"), + IfStmt_Else: info[*ast.IfStmt]("Else"), + IfStmt_Init: info[*ast.IfStmt]("Init"), + ImportSpec_Comment: info[*ast.ImportSpec]("Comment"), + ImportSpec_Doc: info[*ast.ImportSpec]("Doc"), + ImportSpec_Name: info[*ast.ImportSpec]("Name"), + ImportSpec_Path: info[*ast.ImportSpec]("Path"), + IncDecStmt_X: info[*ast.IncDecStmt]("X"), + IndexExpr_Index: info[*ast.IndexExpr]("Index"), + IndexExpr_X: info[*ast.IndexExpr]("X"), + IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"), + IndexListExpr_X: info[*ast.IndexListExpr]("X"), + InterfaceType_Methods: info[*ast.InterfaceType]("Methods"), + KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"), + KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"), + LabeledStmt_Label: info[*ast.LabeledStmt]("Label"), + LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"), + MapType_Key: info[*ast.MapType]("Key"), + MapType_Value: info[*ast.MapType]("Value"), + ParenExpr_X: info[*ast.ParenExpr]("X"), + RangeStmt_Body: info[*ast.RangeStmt]("Body"), + RangeStmt_Key: info[*ast.RangeStmt]("Key"), + RangeStmt_Value: info[*ast.RangeStmt]("Value"), + RangeStmt_X: info[*ast.RangeStmt]("X"), + ReturnStmt_Results: info[*ast.ReturnStmt]("Results"), + SelectStmt_Body: info[*ast.SelectStmt]("Body"), + SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"), + SelectorExpr_X: info[*ast.SelectorExpr]("X"), + SendStmt_Chan: info[*ast.SendStmt]("Chan"), + SendStmt_Value: info[*ast.SendStmt]("Value"), + SliceExpr_High: info[*ast.SliceExpr]("High"), + SliceExpr_Low: info[*ast.SliceExpr]("Low"), + SliceExpr_Max: info[*ast.SliceExpr]("Max"), + SliceExpr_X: info[*ast.SliceExpr]("X"), + StarExpr_X: info[*ast.StarExpr]("X"), + StructType_Fields: info[*ast.StructType]("Fields"), + SwitchStmt_Body: info[*ast.SwitchStmt]("Body"), + SwitchStmt_Init: info[*ast.SwitchStmt]("Init"), + SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"), + TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"), + TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"), + TypeSpec_Comment: info[*ast.TypeSpec]("Comment"), + TypeSpec_Doc: info[*ast.TypeSpec]("Doc"), + TypeSpec_Name: info[*ast.TypeSpec]("Name"), + TypeSpec_Type: info[*ast.TypeSpec]("Type"), + TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"), + TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"), + TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"), + TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"), + UnaryExpr_X: info[*ast.UnaryExpr]("X"), + ValueSpec_Comment: info[*ast.ValueSpec]("Comment"), + ValueSpec_Doc: info[*ast.ValueSpec]("Doc"), + ValueSpec_Names: info[*ast.ValueSpec]("Names"), + ValueSpec_Type: info[*ast.ValueSpec]("Type"), + ValueSpec_Values: info[*ast.ValueSpec]("Values"), +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 00000000000..7348c50c0c3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000000..8da58fbf6f8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 00000000000..866d74a7ad7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000000..b50c6e87755 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000000..acf71402cf3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000000..129bc2a97d3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000000..a1c2cc52627 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000000..0ee738e11b6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000000..81d05dfe573 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000000..7c1f5fac3db --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000000..4120e0c9160 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000000..0b9bb6030a0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000000..4c45e660a8f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000000..a2dde608cb7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000000..30813884c06 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000000..f6a9c8e34b1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index cbe19f6e3d3..d4202a60459 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -144,8 +144,8 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/containernetworking/plugins v1.5.1 -## explicit; go 1.20 +# github.com/containernetworking/plugins v1.6.2 +## explicit; go 1.23 github.com/containernetworking/plugins/pkg/ns # github.com/containers/buildah v1.39.2 ## explicit; go 1.22.8 @@ -179,8 +179,8 @@ github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/pkg/volumes github.com/containers/buildah/util -# github.com/containers/common v0.62.2-0.20250311121556-b27979403716 -## explicit; go 1.22.8 +# github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef +## explicit; go 1.23.0 github.com/containers/common/internal github.com/containers/common/internal/attributedstring github.com/containers/common/libimage @@ -252,7 +252,7 @@ github.com/containers/conmon/runner/config # github.com/containers/gvisor-tap-vsock v0.8.4 ## explicit; go 1.22.0 github.com/containers/gvisor-tap-vsock/pkg/types -# github.com/containers/image/v5 v5.34.2-0.20250306154130-12497efe55ac +# github.com/containers/image/v5 v5.34.3-0.20250311194052-d84dbab374e7 ## explicit; go 1.22.8 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -795,8 +795,8 @@ github.com/mattn/go-shellwords # github.com/mattn/go-sqlite3 v1.14.24 ## explicit; go 1.19 github.com/mattn/go-sqlite3 -# github.com/mdlayher/socket v0.5.0 -## explicit; go 1.21 +# github.com/mdlayher/socket v0.5.1 +## explicit; go 1.20 github.com/mdlayher/socket # github.com/mdlayher/vsock v1.2.1 ## explicit; go 1.20 @@ -860,8 +860,8 @@ github.com/nxadm/tail/winfile # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.22.2 -## explicit; go 1.22.0 +# github.com/onsi/ginkgo/v2 v2.23.1 +## explicit; go 1.23.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -899,6 +899,14 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/opencontainers/cgroups v0.0.1 +## explicit; go 1.23.0 +github.com/opencontainers/cgroups +github.com/opencontainers/cgroups/devices/config +github.com/opencontainers/cgroups/fs +github.com/opencontainers/cgroups/fs2 +github.com/opencontainers/cgroups/fscommon +github.com/opencontainers/cgroups/internal/path # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -909,11 +917,6 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.2.6 ## explicit; go 1.22 github.com/opencontainers/runc/libcontainer/apparmor -github.com/opencontainers/runc/libcontainer/cgroups -github.com/opencontainers/runc/libcontainer/cgroups/fs -github.com/opencontainers/runc/libcontainer/cgroups/fs2 -github.com/opencontainers/runc/libcontainer/cgroups/fscommon -github.com/opencontainers/runc/libcontainer/configs github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/utils @@ -957,7 +960,7 @@ github.com/pelletier/go-toml/v2/unstable # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.7 +# github.com/pkg/sftp v1.13.8 ## explicit; go 1.15 github.com/pkg/sftp github.com/pkg/sftp/internal/encoding/ssh/filexfer @@ -1130,9 +1133,12 @@ github.com/vishvananda/netns # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.etcd.io/bbolt v1.3.11 -## explicit; go 1.22 +# go.etcd.io/bbolt v1.4.0 +## explicit; go 1.23 go.etcd.io/bbolt +go.etcd.io/bbolt/errors +go.etcd.io/bbolt/internal/common +go.etcd.io/bbolt/internal/freelist # go.mongodb.org/mongo-driver v1.14.0 ## explicit; go 1.18 go.mongodb.org/mongo-driver/bson @@ -1217,7 +1223,7 @@ golang.org/x/crypto/xts # golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c ## explicit; go 1.22.0 golang.org/x/exp/maps -# golang.org/x/mod v0.22.0 +# golang.org/x/mod v0.23.0 ## explicit; go 1.22.0 golang.org/x/mod/semver golang.org/x/mod/sumdb/note @@ -1281,10 +1287,11 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.29.0 +# golang.org/x/tools v0.30.0 ## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector +golang.org/x/tools/internal/astutil/edge # google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api @@ -1399,6 +1406,9 @@ gopkg.in/natefinch/lumberjack.v2 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 ## explicit gopkg.in/tomb.v1 +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 +gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 @@ -1406,12 +1416,12 @@ gopkg.in/yaml.v3 ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 -# tags.cncf.io/container-device-interface v0.8.1 +# tags.cncf.io/container-device-interface v1.0.0 ## explicit; go 1.20 tags.cncf.io/container-device-interface/internal/validation tags.cncf.io/container-device-interface/internal/validation/k8s tags.cncf.io/container-device-interface/pkg/cdi tags.cncf.io/container-device-interface/pkg/parser -# tags.cncf.io/container-device-interface/specs-go v0.8.0 +# tags.cncf.io/container-device-interface/specs-go v1.0.0 ## explicit; go 1.19 tags.cncf.io/container-device-interface/specs-go diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go index a38b0f1bcf9..a596c610bf1 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go @@ -71,7 +71,7 @@ func ParseAnnotations(annotations map[string]string) ([]string, []string, error) continue } for _, d := range strings.Split(value, ",") { - if !IsQualifiedName(d) { + if !parser.IsQualifiedName(d) { return nil, nil, fmt.Errorf("invalid CDI device name %q", d) } devices = append(devices, d) @@ -130,7 +130,7 @@ func AnnotationKey(pluginName, deviceID string) (string, error) { func AnnotationValue(devices []string) (string, error) { value, sep := "", "" for _, d := range devices { - if _, _, _, err := ParseQualifiedName(d); err != nil { + if _, _, _, err := parser.ParseQualifiedName(d); err != nil { return "", err } value += sep + d diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go index 9afa4b18202..7095f27daec 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go @@ -22,6 +22,7 @@ import ( "io/fs" "os" "path/filepath" + "runtime" "sort" "strings" "sync" @@ -116,7 +117,7 @@ func (c *Cache) configure(options ...Option) { c.watch.setup(c.specDirs, c.dirErrors) c.watch.start(&c.Mutex, c.refresh, c.dirErrors) } - c.refresh() + _ = c.refresh() // we record but ignore errors } // Refresh rescans the CDI Spec directories and refreshes the Cache. @@ -222,7 +223,8 @@ func (c *Cache) refreshIfRequired(force bool) (bool, error) { // InjectDevices injects the given qualified devices to an OCI Spec. It // returns any unresolvable devices and an error if injection fails for -// any of the devices. +// any of the devices. Might trigger a cache refresh, in which case any +// errors encountered can be obtained using GetErrors(). func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, error) { var unresolved []string @@ -233,7 +235,7 @@ func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, e c.Lock() defer c.Unlock() - c.refreshIfRequired(false) + _, _ = c.refreshIfRequired(false) // we record but ignore errors edits := &ContainerEdits{} specs := map[*Spec]struct{}{} @@ -335,24 +337,27 @@ func (c *Cache) RemoveSpec(name string) error { return err } -// GetDevice returns the cached device for the given qualified name. +// GetDevice returns the cached device for the given qualified name. Might trigger +// a cache refresh, in which case any errors encountered can be obtained using +// GetErrors(). func (c *Cache) GetDevice(device string) *Device { c.Lock() defer c.Unlock() - c.refreshIfRequired(false) + _, _ = c.refreshIfRequired(false) // we record but ignore errors return c.devices[device] } -// ListDevices lists all cached devices by qualified name. +// ListDevices lists all cached devices by qualified name. Might trigger a cache +// refresh, in which case any errors encountered can be obtained using GetErrors(). func (c *Cache) ListDevices() []string { var devices []string c.Lock() defer c.Unlock() - c.refreshIfRequired(false) + _, _ = c.refreshIfRequired(false) // we record but ignore errors for name := range c.devices { devices = append(devices, name) @@ -362,14 +367,15 @@ func (c *Cache) ListDevices() []string { return devices } -// ListVendors lists all vendors known to the cache. +// ListVendors lists all vendors known to the cache. Might trigger a cache refresh, +// in which case any errors encountered can be obtained using GetErrors(). func (c *Cache) ListVendors() []string { var vendors []string c.Lock() defer c.Unlock() - c.refreshIfRequired(false) + _, _ = c.refreshIfRequired(false) // we record but ignore errors for vendor := range c.specs { vendors = append(vendors, vendor) @@ -379,7 +385,8 @@ func (c *Cache) ListVendors() []string { return vendors } -// ListClasses lists all device classes known to the cache. +// ListClasses lists all device classes known to the cache. Might trigger a cache +// refresh, in which case any errors encountered can be obtained using GetErrors(). func (c *Cache) ListClasses() []string { var ( cmap = map[string]struct{}{} @@ -389,7 +396,7 @@ func (c *Cache) ListClasses() []string { c.Lock() defer c.Unlock() - c.refreshIfRequired(false) + _, _ = c.refreshIfRequired(false) // we record but ignore errors for _, specs := range c.specs { for _, spec := range specs { @@ -404,12 +411,13 @@ func (c *Cache) ListClasses() []string { return classes } -// GetVendorSpecs returns all specs for the given vendor. +// GetVendorSpecs returns all specs for the given vendor. Might trigger a cache +// refresh, in which case any errors encountered can be obtained using GetErrors(). func (c *Cache) GetVendorSpecs(vendor string) []*Spec { c.Lock() defer c.Unlock() - c.refreshIfRequired(false) + _, _ = c.refreshIfRequired(false) // we record but ignore errors return c.specs[vendor] } @@ -522,6 +530,13 @@ func (w *watch) watch(fsw *fsnotify.Watcher, m *sync.Mutex, refresh func() error if watch == nil { return } + + eventMask := fsnotify.Rename | fsnotify.Remove | fsnotify.Write + // On macOS, we also need to watch for Create events. + if runtime.GOOS == "darwin" { + eventMask |= fsnotify.Create + } + for { select { case event, ok := <-watch.Events: @@ -529,10 +544,10 @@ func (w *watch) watch(fsw *fsnotify.Watcher, m *sync.Mutex, refresh func() error return } - if (event.Op & (fsnotify.Rename | fsnotify.Remove | fsnotify.Write)) == 0 { + if (event.Op & eventMask) == 0 { continue } - if event.Op == fsnotify.Write { + if event.Op == fsnotify.Write || event.Op == fsnotify.Create { if ext := filepath.Ext(event.Name); ext != ".json" && ext != ".yaml" { continue } @@ -544,7 +559,7 @@ func (w *watch) watch(fsw *fsnotify.Watcher, m *sync.Mutex, refresh func() error } else { w.update(dirErrors) } - refresh() + _ = refresh() m.Unlock() case _, ok := <-watch.Errors: diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_darwin.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_darwin.go new file mode 100644 index 00000000000..b09ea6ff975 --- /dev/null +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_darwin.go @@ -0,0 +1,26 @@ +//go:build darwin +// +build darwin + +/* + Copyright © 2021 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import "syscall" + +func osSync() { + _ = syscall.Sync() +} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go index 0ee5fb86f56..b7c44129fbf 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go @@ -1,5 +1,5 @@ -//go:build !windows -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright © 2021 The CDI Authors diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go index 70791666c22..4744eff8fad 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go @@ -26,7 +26,7 @@ import ( oci "github.com/opencontainers/runtime-spec/specs-go" ocigen "github.com/opencontainers/runtime-tools/generate" - "tags.cncf.io/container-device-interface/specs-go" + cdi "tags.cncf.io/container-device-interface/specs-go" ) const ( @@ -64,7 +64,7 @@ var ( // to all OCI Specs where at least one devices from the CDI Spec // is injected. type ContainerEdits struct { - *specs.ContainerEdits + *cdi.ContainerEdits } // Apply edits to the given OCI Spec. Updates the OCI Spec in place. @@ -205,7 +205,7 @@ func (e *ContainerEdits) Append(o *ContainerEdits) *ContainerEdits { e = &ContainerEdits{} } if e.ContainerEdits == nil { - e.ContainerEdits = &specs.ContainerEdits{} + e.ContainerEdits = &cdi.ContainerEdits{} } e.Env = append(e.Env, o.Env...) @@ -259,7 +259,7 @@ func ValidateEnv(env []string) error { // DeviceNode is a CDI Spec DeviceNode wrapper, used for validating DeviceNodes. type DeviceNode struct { - *specs.DeviceNode + *cdi.DeviceNode } // Validate a CDI Spec DeviceNode. @@ -289,7 +289,7 @@ func (d *DeviceNode) Validate() error { // Hook is a CDI Spec Hook wrapper, used for validating hooks. type Hook struct { - *specs.Hook + *cdi.Hook } // Validate a hook. @@ -308,7 +308,7 @@ func (h *Hook) Validate() error { // Mount is a CDI Mount wrapper, used for validating mounts. type Mount struct { - *specs.Mount + *cdi.Mount } // Validate a mount. @@ -325,13 +325,13 @@ func (m *Mount) Validate() error { // IntelRdt is a CDI IntelRdt wrapper. // This is used for validation and conversion to OCI specifications. type IntelRdt struct { - *specs.IntelRdt + *cdi.IntelRdt } // ValidateIntelRdt validates the IntelRdt configuration. // // Deprecated: ValidateIntelRdt is deprecated use IntelRdt.Validate() instead. -func ValidateIntelRdt(i *specs.IntelRdt) error { +func ValidateIntelRdt(i *cdi.IntelRdt) error { return (&IntelRdt{i}).Validate() } @@ -355,7 +355,7 @@ func ensureOCIHooks(spec *oci.Spec) { func sortMounts(specgen *ocigen.Generator) { mounts := specgen.Mounts() specgen.ClearMounts() - sort.Sort(orderedMounts(mounts)) + sort.Stable(orderedMounts(mounts)) specgen.Config.Mounts = mounts } @@ -375,14 +375,7 @@ func (m orderedMounts) Len() int { // mount indexed by parameter 1 is less than that of the mount indexed by // parameter 2. Used in sorting. func (m orderedMounts) Less(i, j int) bool { - ip, jp := m.parts(i), m.parts(j) - if ip < jp { - return true - } - if jp < ip { - return false - } - return m[i].Destination < m[j].Destination + return m.parts(i) < m.parts(j) } // Swap swaps two items in an array of mounts. Used in sorting diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go index 00be48dd5e5..2e5fa57f400 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go @@ -67,7 +67,7 @@ func (d *Device) edits() *ContainerEdits { // Validate the device. func (d *Device) validate() error { - if err := ValidateDeviceName(d.Name); err != nil { + if err := parser.ValidateDeviceName(d.Name); err != nil { return err } name := d.Name diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go index 0ea07145522..d00e0d33953 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go @@ -35,25 +35,11 @@ // available and instantiated the first time it is referenced directly // or indirectly. The most frequently used cache functions are available // as identically named package level functions which operate on the -// default cache instance. Moreover, the registry also operates on the -// same default cache. We plan to deprecate the registry and eventually -// remove it in a future release. -// -// # CDI Registry -// -// Note: the Registry and its related interfaces are deprecated and will -// be removed in a future version. Please use the default cache and its -// related package-level function instead. -// -// The primary interface to interact with CDI devices is the Registry. It -// is essentially a cache of all Specs and devices discovered in standard -// CDI directories on the host. The registry has two main functionality, -// injecting devices into an OCI Spec and refreshing the cache of CDI -// Specs and devices. +// default cache instance. // // # Device Injection // -// Using the Registry one can inject CDI devices into a container with code +// Using the Cache one can inject CDI devices into a container with code // similar to the following snippet: // // import ( @@ -63,13 +49,14 @@ // log "github.com/sirupsen/logrus" // // "tags.cncf.io/container-device-interface/pkg/cdi" -// oci "github.com/opencontainers/runtime-spec/specs-go" +// "github.com/opencontainers/runtime-spec/specs-go" // ) // -// func injectCDIDevices(spec *oci.Spec, devices []string) error { +// func injectCDIDevices(spec *specs.Spec, devices []string) error { // log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) // -// unresolved, err := cdi.GetRegistry().InjectDevices(spec, devices) +// cache := cdi.GetDefaultCache() +// unresolved, err := cache.InjectDevices(spec, devices) // if err != nil { // return fmt.Errorf("CDI device injection failed: %w", err) // } @@ -106,17 +93,17 @@ // log "github.com/sirupsen/logrus" // // "tags.cncf.io/container-device-interface/pkg/cdi" -// oci "github.com/opencontainers/runtime-spec/specs-go" +// "github.com/opencontainers/runtime-spec/specs-go" // ) // -// func injectCDIDevices(spec *oci.Spec, devices []string) error { -// registry := cdi.GetRegistry() +// func injectCDIDevices(spec *specs.Spec, devices []string) error { +// cache := cdi.GetDefaultCache() // -// if err := registry.Refresh(); err != nil { +// if err := cache.Refresh(); err != nil { // // Note: // // It is up to the implementation to decide whether // // to abort injection on errors. A failed Refresh() -// // does not necessarily render the registry unusable. +// // does not necessarily render the cache unusable. // // For instance, a parse error in a Spec file for // // vendor A does not have any effect on devices of // // vendor B... @@ -125,7 +112,7 @@ // // log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) // -// unresolved, err := registry.InjectDevices(spec, devices) +// unresolved, err := cache.InjectDevices(spec, devices) // if err != nil { // return fmt.Errorf("CDI device injection failed: %w", err) // } @@ -192,7 +179,7 @@ // ) // // func generateDeviceSpecs() error { -// registry := cdi.GetRegistry() +// cache := specs.GetDefaultCache() // spec := &specs.Spec{ // Version: specs.CurrentVersion, // Kind: vendor+"/"+class, @@ -210,7 +197,7 @@ // return fmt.Errorf("failed to generate Spec name: %w", err) // } // -// return registry.SpecDB().WriteSpec(spec, specName) +// return cache.WriteSpec(spec, specName) // } // // Similarly, generating and later cleaning up transient Spec files can be @@ -229,7 +216,7 @@ // ) // // func generateTransientSpec(ctr Container) error { -// registry := cdi.GetRegistry() +// cache := specs.GetDefaultCache() // devices := getContainerDevs(ctr, vendor, class) // spec := &specs.Spec{ // Version: specs.CurrentVersion, @@ -257,21 +244,21 @@ // return fmt.Errorf("failed to generate Spec name: %w", err) // } // -// return registry.SpecDB().WriteSpec(spec, specName) +// return cache.WriteSpec(spec, specName) // } // // func removeTransientSpec(ctr Container) error { -// registry := cdi.GetRegistry() +// cache := specs.GetDefaultCache() // transientID := getSomeSufficientlyUniqueIDForContainer(ctr) // specName := cdi.GenerateNameForTransientSpec(vendor, class, transientID) // -// return registry.SpecDB().RemoveSpec(specName) +// return cache.RemoveSpec(specName) // } // // # CDI Spec Validation // // This package performs both syntactic and semantic validation of CDI -// Spec file data when a Spec file is loaded via the registry or using +// Spec file data when a Spec file is loaded via the cache or using // the ReadSpec API function. As part of the semantic verification, the // Spec file is verified against the CDI Spec JSON validation schema. // diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go deleted file mode 100644 index 0bdfdc1661e..00000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "tags.cncf.io/container-device-interface/pkg/parser" -) - -// QualifiedName returns the qualified name for a device. -// The syntax for a qualified device names is -// -// "/=". -// -// A valid vendor and class name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. -// -// A valid device name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' -// -// Deprecated: use parser.QualifiedName instead -func QualifiedName(vendor, class, name string) string { - return parser.QualifiedName(vendor, class, name) -} - -// IsQualifiedName tests if a device name is qualified. -// -// Deprecated: use parser.IsQualifiedName instead -func IsQualifiedName(device string) bool { - return parser.IsQualifiedName(device) -} - -// ParseQualifiedName splits a qualified name into device vendor, class, -// and name. If the device fails to parse as a qualified name, or if any -// of the split components fail to pass syntax validation, vendor and -// class are returned as empty, together with the verbatim input as the -// name and an error describing the reason for failure. -// -// Deprecated: use parser.ParseQualifiedName instead -func ParseQualifiedName(device string) (string, string, string, error) { - return parser.ParseQualifiedName(device) -} - -// ParseDevice tries to split a device name into vendor, class, and name. -// If this fails, for instance in the case of unqualified device names, -// ParseDevice returns an empty vendor and class together with name set -// to the verbatim input. -// -// Deprecated: use parser.ParseDevice instead -func ParseDevice(device string) (string, string, string) { - return parser.ParseDevice(device) -} - -// ParseQualifier splits a device qualifier into vendor and class. -// The syntax for a device qualifier is -// -// "/" -// -// If parsing fails, an empty vendor and the class set to the -// verbatim input is returned. -// -// Deprecated: use parser.ParseQualifier instead -func ParseQualifier(kind string) (string, string) { - return parser.ParseQualifier(kind) -} - -// ValidateVendorName checks the validity of a vendor name. -// A vendor name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -// -// Deprecated: use parser.ValidateVendorName instead -func ValidateVendorName(vendor string) error { - return parser.ValidateVendorName(vendor) -} - -// ValidateClassName checks the validity of class name. -// A class name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -// -// Deprecated: use parser.ValidateClassName instead -func ValidateClassName(class string) error { - return parser.ValidateClassName(class) -} - -// ValidateDeviceName checks the validity of a device name. -// A device name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, dot, colon ('_', '-', '.', ':') -// -// Deprecated: use parser.ValidateDeviceName instead -func ValidateDeviceName(name string) error { - return parser.ValidateDeviceName(name) -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go deleted file mode 100644 index 3113a05a225..00000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "sync" - - oci "github.com/opencontainers/runtime-spec/specs-go" - cdi "tags.cncf.io/container-device-interface/specs-go" -) - -// Registry keeps a cache of all CDI Specs installed or generated on -// the host. Registry is the primary interface clients should use to -// interact with CDI. -// -// The most commonly used Registry functions are for refreshing the -// registry and injecting CDI devices into an OCI Spec. -// -// Deprecated: Registry is deprecated and will be removed in a future -// version. Please update your code to use the corresponding package- -// level functions Configure(), Refresh(), InjectDevices(), GetErrors(), -// and GetDefaultCache(). -type Registry interface { - RegistryResolver - RegistryRefresher - DeviceDB() RegistryDeviceDB - SpecDB() RegistrySpecDB -} - -// RegistryRefresher is the registry interface for refreshing the -// cache of CDI Specs and devices. -// -// Configure reconfigures the registry with the given options. -// -// Refresh rescans all CDI Spec directories and updates the -// state of the cache to reflect any changes. It returns any -// errors encountered during the refresh. -// -// GetErrors returns all errors encountered for any of the scanned -// Spec files during the last cache refresh. -// -// GetSpecDirectories returns the set up CDI Spec directories -// currently in use. The directories are returned in the scan -// order of Refresh(). -// -// GetSpecDirErrors returns any errors related to the configured -// Spec directories. -// -// Deprecated: RegistryRefresher is deprecated and will be removed -// in a future version. Please use the default cache and its related -// package-level functions instead. -type RegistryRefresher interface { - Configure(...Option) error - Refresh() error - GetErrors() map[string][]error - GetSpecDirectories() []string - GetSpecDirErrors() map[string]error -} - -// RegistryResolver is the registry interface for injecting CDI -// devices into an OCI Spec. -// -// InjectDevices takes an OCI Spec and injects into it a set of -// CDI devices given by qualified name. It returns the names of -// any unresolved devices and an error if injection fails. -// -// Deprecated: RegistryRefresher is deprecated and will be removed -// in a future version. Please use the default cache and its related -// package-level functions instead. -type RegistryResolver interface { - InjectDevices(spec *oci.Spec, device ...string) (unresolved []string, err error) -} - -// RegistryDeviceDB is the registry interface for querying devices. -// -// GetDevice returns the CDI device for the given qualified name. If -// the device is not GetDevice returns nil. -// -// ListDevices returns a slice with the names of qualified device -// known. The returned slice is sorted. -// -// Deprecated: RegistryDeviceDB is deprecated and will be removed -// in a future version. Please use the default cache and its related -// package-level functions instead. -// and will be removed in a future version. Please use the default -// cache and its related package-level functions instead. -type RegistryDeviceDB interface { - GetDevice(device string) *Device - ListDevices() []string -} - -// RegistrySpecDB is the registry interface for querying CDI Specs. -// -// ListVendors returns a slice with all vendors known. The returned -// slice is sorted. -// -// ListClasses returns a slice with all classes known. The returned -// slice is sorted. -// -// GetVendorSpecs returns a slice of all Specs for the vendor. -// -// GetSpecErrors returns any errors for the Spec encountered during -// the last cache refresh. -// -// WriteSpec writes the Spec with the given content and name to the -// last Spec directory. -// -// Deprecated: RegistrySpecDB is deprecated and will be removed -// in a future version. Please use the default cache and its related -// package-level functions instead. -type RegistrySpecDB interface { - ListVendors() []string - ListClasses() []string - GetVendorSpecs(vendor string) []*Spec - GetSpecErrors(*Spec) []error - WriteSpec(raw *cdi.Spec, name string) error - RemoveSpec(name string) error -} - -type registry struct { - *Cache -} - -var _ Registry = ®istry{} - -var ( - reg *registry - initOnce sync.Once -) - -// GetRegistry returns the CDI registry. If any options are given, those -// are applied to the registry. -// -// Deprecated: GetRegistry is deprecated and will be removed in a future -// version. Please use the default cache and its related package-level -// functions instead. -func GetRegistry(options ...Option) Registry { - initOnce.Do(func() { - reg = ®istry{GetDefaultCache()} - }) - if len(options) > 0 { - // We don't care about errors here - _ = reg.Configure(options...) - } - return reg -} - -// DeviceDB returns the registry interface for querying devices. -// -// Deprecated: DeviceDB is deprecated and will be removed in a future -// version. Please use the default cache and its related package-level -// functions instead. -func (r *registry) DeviceDB() RegistryDeviceDB { - return r -} - -// SpecDB returns the registry interface for querying Specs. -// -// Deprecated: SpecDB is deprecated and will be removed in a future -// version. Please use the default cache and its related package-level -// functions instead. -func (r *registry) SpecDB() RegistrySpecDB { - return r -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go index 09005d690c8..b192f962c84 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go @@ -35,8 +35,7 @@ var ( // While altering this variable changes the package defaults, // the preferred way of overriding the default directories is // to use a WithSpecDirs options. Otherwise the change is only - // effective if it takes place before creating the Registry or - // other Cache instances. + // effective if it takes place before creating the cache instance. DefaultSpecDirs = []string{DefaultStaticDir, DefaultDynamicDir} // ErrStopScan can be returned from a ScanSpecFunc to stop the scan. ErrStopScan = errors.New("stop Spec scan") diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go index 1a0a662b806..1ddc244f4d4 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go @@ -25,9 +25,11 @@ import ( "sync" oci "github.com/opencontainers/runtime-spec/specs-go" + orderedyaml "gopkg.in/yaml.v2" "sigs.k8s.io/yaml" "tags.cncf.io/container-device-interface/internal/validation" + "tags.cncf.io/container-device-interface/pkg/parser" cdi "tags.cncf.io/container-device-interface/specs-go" ) @@ -36,9 +38,13 @@ const ( defaultSpecExt = ".yaml" ) +type validator interface { + Validate(*cdi.Spec) error +} + var ( // Externally set CDI Spec validation function. - specValidator func(*cdi.Spec) error + specValidator validator validatorLock sync.RWMutex ) @@ -105,7 +111,7 @@ func newSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) { spec.path += defaultSpecExt } - spec.vendor, spec.class = ParseQualifier(spec.Kind) + spec.vendor, spec.class = parser.ParseQualifier(spec.Kind) if spec.devices, err = spec.validate(); err != nil { return nil, fmt.Errorf("invalid CDI Spec: %w", err) @@ -130,7 +136,7 @@ func (s *Spec) write(overwrite bool) error { } if filepath.Ext(s.path) == ".yaml" { - data, err = yaml.Marshal(s.Spec) + data, err = orderedyaml.Marshal(s.Spec) data = append([]byte("---\n"), data...) } else { data, err = json.Marshal(s.Spec) @@ -200,24 +206,21 @@ func (s *Spec) edits() *ContainerEdits { return &ContainerEdits{&s.ContainerEdits} } +// MinimumRequiredVersion determines the minimum spec version for the input spec. +// Deprecated: use cdi.MinimumRequiredVersion instead +func MinimumRequiredVersion(spec *cdi.Spec) (string, error) { + return cdi.MinimumRequiredVersion(spec) +} + // Validate the Spec. func (s *Spec) validate() (map[string]*Device, error) { - if err := validateVersion(s.Version); err != nil { + if err := cdi.ValidateVersion(s.Spec); err != nil { return nil, err } - - minVersion, err := MinimumRequiredVersion(s.Spec) - if err != nil { - return nil, fmt.Errorf("could not determine minimum required version: %v", err) - } - if newVersion(minVersion).IsGreaterThan(newVersion(s.Version)) { - return nil, fmt.Errorf("the spec version must be at least v%v", minVersion) - } - - if err := ValidateVendorName(s.vendor); err != nil { + if err := parser.ValidateVendorName(s.vendor); err != nil { return nil, err } - if err := ValidateClassName(s.class); err != nil { + if err := parser.ValidateClassName(s.class); err != nil { return nil, err } if err := validation.ValidateSpecAnnotations(s.Kind, s.Annotations); err != nil { @@ -238,17 +241,11 @@ func (s *Spec) validate() (map[string]*Device, error) { } devices[d.Name] = dev } - - return devices, nil -} - -// validateVersion checks whether the specified spec version is supported. -func validateVersion(version string) error { - if !validSpecVersions.isValidVersion(version) { - return fmt.Errorf("invalid version %q", version) + if len(devices) == 0 { + return nil, fmt.Errorf("invalid spec, no devices") } - return nil + return devices, nil } // ParseSpec parses CDI Spec data into a raw CDI Spec. @@ -264,13 +261,13 @@ func ParseSpec(data []byte) (*cdi.Spec, error) { // SetSpecValidator sets a CDI Spec validator function. This function // is used for extra CDI Spec content validation whenever a Spec file // loaded (using ReadSpec() or written (using WriteSpec()). -func SetSpecValidator(fn func(*cdi.Spec) error) { +func SetSpecValidator(v validator) { validatorLock.Lock() defer validatorLock.Unlock() - specValidator = fn + specValidator = v } -// validateSpec validates the Spec using the extneral validator. +// validateSpec validates the Spec using the external validator. func validateSpec(raw *cdi.Spec) error { validatorLock.RLock() defer validatorLock.RUnlock() @@ -278,7 +275,7 @@ func validateSpec(raw *cdi.Spec) error { if specValidator == nil { return nil } - err := specValidator(raw) + err := specValidator.Validate(raw) if err != nil { return fmt.Errorf("Spec validation failed: %w", err) } @@ -328,7 +325,7 @@ func GenerateTransientSpecName(vendor, class, transientID string) string { // the Spec does not contain a valid vendor or class, it returns // an empty name and a non-nil error. func GenerateNameForSpec(raw *cdi.Spec) (string, error) { - vendor, class := ParseQualifier(raw.Kind) + vendor, class := parser.ParseQualifier(raw.Kind) if vendor == "" { return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind) } @@ -342,7 +339,7 @@ func GenerateNameForSpec(raw *cdi.Spec) (string, error) { // If the Spec does not contain a valid vendor or class, it returns an // an empty name and a non-nil error. func GenerateNameForTransientSpec(raw *cdi.Spec, transientID string) (string, error) { - vendor, class := ParseQualifier(raw.Kind) + vendor, class := parser.ParseQualifier(raw.Kind) if vendor == "" { return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind) } diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/config.go b/vendor/tags.cncf.io/container-device-interface/specs-go/config.go index d6d6302ffb4..f28657b865e 100644 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/config.go +++ b/vendor/tags.cncf.io/container-device-interface/specs-go/config.go @@ -2,72 +2,71 @@ package specs import "os" -// CurrentVersion is the current version of the Spec. -const CurrentVersion = "0.8.0" - // Spec is the base configuration for CDI type Spec struct { - Version string `json:"cdiVersion"` - Kind string `json:"kind"` + Version string `json:"cdiVersion" yaml:"cdiVersion"` + Kind string `json:"kind" yaml:"kind"` // Annotations add meta information per CDI spec. Note these are CDI-specific and do not affect container metadata. - Annotations map[string]string `json:"annotations,omitempty"` - Devices []Device `json:"devices"` - ContainerEdits ContainerEdits `json:"containerEdits,omitempty"` + // Added in v0.6.0. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + Devices []Device `json:"devices" yaml:"devices"` + ContainerEdits ContainerEdits `json:"containerEdits,omitempty" yaml:"containerEdits,omitempty"` } // Device is a "Device" a container runtime can add to a container type Device struct { - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // Annotations add meta information per device. Note these are CDI-specific and do not affect container metadata. - Annotations map[string]string `json:"annotations,omitempty"` - ContainerEdits ContainerEdits `json:"containerEdits"` + // Added in v0.6.0. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + ContainerEdits ContainerEdits `json:"containerEdits" yaml:"containerEdits"` } // ContainerEdits are edits a container runtime must make to the OCI spec to expose the device. type ContainerEdits struct { - Env []string `json:"env,omitempty"` - DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty"` - Hooks []*Hook `json:"hooks,omitempty"` - Mounts []*Mount `json:"mounts,omitempty"` - IntelRdt *IntelRdt `json:"intelRdt,omitempty"` - AdditionalGIDs []uint32 `json:"additionalGids,omitempty"` + Env []string `json:"env,omitempty" yaml:"env,omitempty"` + DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty" yaml:"deviceNodes,omitempty"` + Hooks []*Hook `json:"hooks,omitempty" yaml:"hooks,omitempty"` + Mounts []*Mount `json:"mounts,omitempty" yaml:"mounts,omitempty"` + IntelRdt *IntelRdt `json:"intelRdt,omitempty" yaml:"intelRdt,omitempty"` // Added in v0.7.0 + AdditionalGIDs []uint32 `json:"additionalGids,omitempty" yaml:"additionalGids,omitempty"` // Added in v0.7.0 } // DeviceNode represents a device node that needs to be added to the OCI spec. type DeviceNode struct { - Path string `json:"path"` - HostPath string `json:"hostPath,omitempty"` - Type string `json:"type,omitempty"` - Major int64 `json:"major,omitempty"` - Minor int64 `json:"minor,omitempty"` - FileMode *os.FileMode `json:"fileMode,omitempty"` - Permissions string `json:"permissions,omitempty"` - UID *uint32 `json:"uid,omitempty"` - GID *uint32 `json:"gid,omitempty"` + Path string `json:"path" yaml:"path"` + HostPath string `json:"hostPath,omitempty" yaml:"hostPath,omitempty"` // Added in v0.5.0 + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Major int64 `json:"major,omitempty" yaml:"major,omitempty"` + Minor int64 `json:"minor,omitempty" yaml:"minor,omitempty"` + FileMode *os.FileMode `json:"fileMode,omitempty" yaml:"fileMode,omitempty"` + Permissions string `json:"permissions,omitempty" yaml:"permissions,omitempty"` + UID *uint32 `json:"uid,omitempty" yaml:"uid,omitempty"` + GID *uint32 `json:"gid,omitempty" yaml:"gid,omitempty"` } // Mount represents a mount that needs to be added to the OCI spec. type Mount struct { - HostPath string `json:"hostPath"` - ContainerPath string `json:"containerPath"` - Options []string `json:"options,omitempty"` - Type string `json:"type,omitempty"` + HostPath string `json:"hostPath" yaml:"hostPath"` + ContainerPath string `json:"containerPath" yaml:"containerPath"` + Options []string `json:"options,omitempty" yaml:"options,omitempty"` + Type string `json:"type,omitempty" yaml:"type,omitempty"` // Added in v0.4.0 } // Hook represents a hook that needs to be added to the OCI spec. type Hook struct { - HookName string `json:"hookName"` - Path string `json:"path"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` - Timeout *int `json:"timeout,omitempty"` + HookName string `json:"hookName" yaml:"hookName"` + Path string `json:"path" yaml:"path"` + Args []string `json:"args,omitempty" yaml:"args,omitempty"` + Env []string `json:"env,omitempty" yaml:"env,omitempty"` + Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty"` } // IntelRdt describes the Linux IntelRdt parameters to set in the OCI spec. type IntelRdt struct { - ClosID string `json:"closID,omitempty"` - L3CacheSchema string `json:"l3CacheSchema,omitempty"` - MemBwSchema string `json:"memBwSchema,omitempty"` - EnableCMT bool `json:"enableCMT,omitempty"` - EnableMBM bool `json:"enableMBM,omitempty"` + ClosID string `json:"closID,omitempty" yaml:"closID,omitempty"` + L3CacheSchema string `json:"l3CacheSchema,omitempty" yaml:"l3CacheSchema,omitempty"` + MemBwSchema string `json:"memBwSchema,omitempty" yaml:"memBwSchema,omitempty"` + EnableCMT bool `json:"enableCMT,omitempty" yaml:"enableCMT,omitempty"` + EnableMBM bool `json:"enableMBM,omitempty" yaml:"enableMBM,omitempty"` } diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go b/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go deleted file mode 100644 index ce485cb2391..00000000000 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright © 2021 The CDI Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package specs - -import "errors" - -// errDeprecated is returned for the ToOCI functions below. -// This should provide better guidance for user when migrating from the API -// below to the APIs provided in the cdi package. -var errDeprecated = errors.New("deprecated; Use cdi package functions instead") - -// ToOCI returns the opencontainers runtime Spec Hook for this Hook. -// -// Deprecated: This function has been moved to tags.cncf.io/container-device-interface/pkg/cdi.Hook.toOCI -// and made private. -func (h *Hook) ToOCI() error { - return errDeprecated -} - -// ToOCI returns the opencontainers runtime Spec Mount for this Mount. -// -// Deprecated: This function has been moved to tags.cncf.io/container-device-interface/pkg/cdi.Mount.toOCI -// and made private. -func (m *Mount) ToOCI() error { - return errDeprecated -} - -// ToOCI returns the opencontainers runtime Spec LinuxDevice for this DeviceNode. -// -// Deprecated: This function has been moved to tags.cncf.io/container-device-interface/pkg/cdi.DeviceNode.toOCI -// and made private. -func (d *DeviceNode) ToOCI() error { - return errDeprecated -} - -// ToOCI returns the opencontainers runtime Spec LinuxIntelRdt for this IntelRdt config. -// -// Deprecated: This function has been moved to tags.cncf.io/container-device-interface/pkg/cdi.IntelRdt.toOCI -// and made private. -func (i *IntelRdt) ToOCI() error { - return errDeprecated -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go b/vendor/tags.cncf.io/container-device-interface/specs-go/version.go similarity index 69% rename from vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go rename to vendor/tags.cncf.io/container-device-interface/specs-go/version.go index 9ca91267140..002e035059a 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go +++ b/vendor/tags.cncf.io/container-device-interface/specs-go/version.go @@ -14,20 +14,18 @@ limitations under the License. */ -package cdi +package specs import ( + "fmt" "strings" "golang.org/x/mod/semver" - - "tags.cncf.io/container-device-interface/pkg/parser" - cdi "tags.cncf.io/container-device-interface/specs-go" ) const ( - // CurrentVersion is the current version of the CDI Spec. - CurrentVersion = cdi.CurrentVersion + // CurrentVersion is the current version of the Spec. + CurrentVersion = "1.0.0" // vCurrent is the current version as a semver-comparable type vCurrent version = "v" + CurrentVersion @@ -41,6 +39,7 @@ const ( v060 version = "v0.6.0" v070 version = "v0.7.0" v080 version = "v0.8.0" + v100 version = "v1.0.0" // vEarliest is the earliest supported version of the CDI specification vEarliest version = v030 @@ -58,10 +57,29 @@ var validSpecVersions = requiredVersionMap{ v060: requiresV060, v070: requiresV070, v080: requiresV080, + v100: requiresV100, +} + +// ValidateVersion checks whether the specified spec version is valid. +// In addition to checking whether the spec version is in the set of known versions, +// the spec is inspected to determine whether the features used are available in specified +// version. +func ValidateVersion(spec *Spec) error { + if !validSpecVersions.isValidVersion(spec.Version) { + return fmt.Errorf("invalid version %q", spec.Version) + } + minVersion, err := MinimumRequiredVersion(spec) + if err != nil { + return fmt.Errorf("could not determine minimum required version: %w", err) + } + if newVersion(minVersion).isGreaterThan(newVersion(spec.Version)) { + return fmt.Errorf("the spec version must be at least v%v", minVersion) + } + return nil } // MinimumRequiredVersion determines the minimum spec version for the input spec. -func MinimumRequiredVersion(spec *cdi.Spec) (string, error) { +func MinimumRequiredVersion(spec *Spec) (string, error) { minVersion := validSpecVersions.requiredVersion(spec) return minVersion.String(), nil } @@ -80,17 +98,17 @@ func (v version) String() string { return strings.TrimPrefix(string(v), "v") } -// IsGreaterThan checks with a version is greater than the specified version. -func (v version) IsGreaterThan(o version) bool { +// isGreaterThan checks with a version is greater than the specified version. +func (v version) isGreaterThan(o version) bool { return semver.Compare(string(v), string(o)) > 0 } -// IsLatest checks whether the version is the latest supported version -func (v version) IsLatest() bool { +// isLatest checks whether the version is the latest supported version +func (v version) isLatest() bool { return v == vCurrent } -type requiredFunc func(*cdi.Spec) bool +type requiredFunc func(*Spec) bool type requiredVersionMap map[version]requiredFunc @@ -103,18 +121,18 @@ func (r requiredVersionMap) isValidVersion(specVersion string) bool { } // requiredVersion returns the minimum version required for the given spec -func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version { +func (r requiredVersionMap) requiredVersion(spec *Spec) version { minVersion := vEarliest for v, isRequired := range validSpecVersions { if isRequired == nil { continue } - if isRequired(spec) && v.IsGreaterThan(minVersion) { + if isRequired(spec) && v.isGreaterThan(minVersion) { minVersion = v } // If we have already detected the latest version then no later version could be detected - if minVersion.IsLatest() { + if minVersion.isLatest() { break } } @@ -122,15 +140,22 @@ func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version { return minVersion } +// requiresV100 returns true if the spec uses v1.0.0 features. +// Since the v1.0.0 spec bump was due to moving the minimum version checks to +// the spec package, there are no explicit spec changes. +func requiresV100(_ *Spec) bool { + return false +} + // requiresV080 returns true if the spec uses v0.8.0 features. // Since the v0.8.0 spec bump was due to the removed .ToOCI functions on the -// spec types, there are explicit spec changes. -func requiresV080(_ *cdi.Spec) bool { +// spec types, there are no explicit spec changes. +func requiresV080(_ *Spec) bool { return false } // requiresV070 returns true if the spec uses v0.7.0 features -func requiresV070(spec *cdi.Spec) bool { +func requiresV070(spec *Spec) bool { if spec.ContainerEdits.IntelRdt != nil { return true } @@ -153,7 +178,7 @@ func requiresV070(spec *cdi.Spec) bool { } // requiresV060 returns true if the spec uses v0.6.0 features -func requiresV060(spec *cdi.Spec) bool { +func requiresV060(spec *Spec) bool { // The v0.6.0 spec allows annotations to be specified at a spec level for range spec.Annotations { return true @@ -167,23 +192,20 @@ func requiresV060(spec *cdi.Spec) bool { } // The v0.6.0 spec allows dots "." in Kind name label (class) - vendor, class := parser.ParseQualifier(spec.Kind) - if vendor != "" { - if strings.ContainsRune(class, '.') { - return true - } + if !strings.Contains(spec.Kind, "/") { + return false } - - return false + class := strings.SplitN(spec.Kind, "/", 2)[1] + return strings.Contains(class, ".") } // requiresV050 returns true if the spec uses v0.5.0 features -func requiresV050(spec *cdi.Spec) bool { - var edits []*cdi.ContainerEdits +func requiresV050(spec *Spec) bool { + var edits []*ContainerEdits for _, d := range spec.Devices { - // The v0.5.0 spec allowed device names to start with a digit instead of requiring a letter - if len(d.Name) > 0 && !parser.IsLetter(rune(d.Name[0])) { + // The v0.5.0 spec allowed device name to start with a digit + if len(d.Name) > 0 && '0' <= d.Name[0] && d.Name[0] <= '9' { return true } edits = append(edits, &d.ContainerEdits) @@ -202,8 +224,8 @@ func requiresV050(spec *cdi.Spec) bool { } // requiresV040 returns true if the spec uses v0.4.0 features -func requiresV040(spec *cdi.Spec) bool { - var edits []*cdi.ContainerEdits +func requiresV040(spec *Spec) bool { + var edits []*ContainerEdits for _, d := range spec.Devices { edits = append(edits, &d.ContainerEdits)