diff --git a/.travis.yml b/.travis.yml index e3da1b8d5..eaaf6bb1b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,169 +1,26 @@ -# Generated by scripts/generate_travis_yml.py - dist: bionic -jobs: - include: - - language: python - python: 3.8 - addons: - apt: - packages: - - libbz2-dev - - liblzma-dev - - zlib1g-dev - install: - script: python setup.py build -j "$(nproc)" test - - language: python - python: 3.7 - addons: - apt: - packages: - - libbz2-dev - - liblzma-dev - - zlib1g-dev - install: - script: python setup.py build -j "$(nproc)" test - - language: python - python: 3.6 - addons: - apt: - packages: - - libbz2-dev - - liblzma-dev - - zlib1g-dev - install: - script: python setup.py build -j "$(nproc)" test - - name: "vmtest Linux 5.6" - language: minimal - env: KERNEL=5.6 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 - - name: "vmtest Linux 5.5" - language: minimal - env: KERNEL=5.5 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 - - name: "vmtest Linux 5.4" - language: minimal - env: KERNEL=5.4 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 - - name: "vmtest Linux 4.19" - language: minimal - env: KERNEL=4.19 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 - - name: "vmtest Linux 4.14" - language: minimal - env: KERNEL=4.14 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 - - name: "vmtest Linux 4.9" - language: minimal - env: KERNEL=4.9 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 - - name: "vmtest Linux 4.4" - language: minimal - env: KERNEL=4.4 - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0 +language: python +python: + - '3.8' + - '3.7' + - '3.6' +install: + # Upstream defaults to world-read-writeable /dev/kvm. Debian/Ubuntu override + # this; see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=892945. We want + # the upstream default. + - sudo echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /lib/udev/rules.d/99-fix-kvm.rules > /dev/null + - sudo udevadm control --reload-rules + # On systemd >= 238 we can use udevadm trigger -w and remove udevadm settle. + - sudo udevadm trigger /dev/kvm + - sudo udevadm settle +script: python setup.py test -K + +addons: + apt: + packages: + - libbz2-dev + - liblzma-dev + - qemu-kvm + - zlib1g-dev + - zstd diff --git a/MANIFEST.in b/MANIFEST.in index 8c8aa64e9..e8b551819 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,3 @@ recursive-include docs *.py *.rst recursive-include tests *.py -recursive-include scripts/vmtest *.py *.rst *.sh +recursive-include vmtest *.py *.rst *.sh diff --git a/debian/rules b/debian/rules index 641186e52..a0c591768 100755 --- a/debian/rules +++ b/debian/rules @@ -2,3 +2,17 @@ %: dh $@ --with python3 --buildsystem=pybuild + +override_dh_auto_test: + # + # Don't run drgn's tests during the build step; The repo comes + # with its own testing that takes place elsewhere -> + # https://travis-ci.org/github/delphix/drgn/branches + # + # Ideally it wouldn't hurt having them run here too but due to + # drgn's testing setup, we'd have to override pybuild's default + # behavior. Since drgn is not a project that is owned by us, + # disabling tests here makes even more sense as we won't have + # to adjust the setup here every time a change in the test + # framework of drgn is applied upstream. + # diff --git a/scripts/generate_travis_yml.py b/scripts/generate_travis_yml.py deleted file mode 100755 index b671c7378..000000000 --- a/scripts/generate_travis_yml.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python3 - -""" -We want to test drgn on multiple Python versions on Ubuntu in addition to -multiple kernel versions using the vmtest setup. The .travis.yml build matrix -can't easily express this, so this script generates the jobs manually. -""" - - -PYTHON = ["3.8", "3.7", "3.6"] -KERNEL = ["5.6", "5.5", "5.4", "4.19", "4.14", "4.9", "4.4"] - - -if __name__ == "__main__": - print( - """\ -# Generated by scripts/generate_travis_yml.py - -dist: bionic - -jobs: - include:""" - ) - for python in PYTHON: - print( - f"""\ - - language: python - python: {python} - addons: - apt: - packages: - - libbz2-dev - - liblzma-dev - - zlib1g-dev - install: - script: python setup.py build -j "$(nproc)" test""" - ) - for kernel in KERNEL: - print( - f"""\ - - name: "vmtest Linux {kernel}" - language: minimal - env: KERNEL={kernel} - addons: - apt: - packages: - - python3 - - python3-setuptools - - qemu-kvm - - rsync - - zstd - install: sudo adduser "$USER" kvm - before_script: - # The double sudo is necessary to pick up the new group membership. - - sudo -E sudo -E -u "$USER" scripts/vmtest/run.sh -k "$KERNEL"'.*' -o -d ~ ~/root.img; exitstatus=$? - # Exit status 0 is success, 1 is test failure (should fail in the script - # step), anything else is an error (should fail here). - - test $exitstatus -le 1 - script: test $exitstatus -eq 0""" - ) diff --git a/scripts/vmtest/README.rst b/scripts/vmtest/README.rst deleted file mode 100644 index a1d4dfddf..000000000 --- a/scripts/vmtest/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -drgn VM Testing -=============== - -drgn is tested on multiple Linux kernel versions using QEMU. The tests are run -using an Arch Linux-based root filesystem image and a minimal kernel. - -``scripts/vmtest/run.sh`` downloads the required testing files, sets up the -disk image for the virtual machine, and runs drgn tests in the virtual machine. - -``scripts/vmtest/mkrootfs.sh`` builds the root filesystem image. It must be run -from an Arch Linux machine (or an Arch Linux chroot). The image contains the -dependencies for drgn and a BusyBox init setup. The setup allows ``run.sh`` to -simply copy in the source code and drop in a couple of init scripts to -automatically run tests on boot. - -The root filesystem images and kernel builds are hosted on `Dropbox -`_. -``scripts/vmtest/manage.py`` builds kernels and uploads files to this shared -folder using the Dropbox API. It also updates the ``INDEX`` file in that shared -folder, which is required because the files under a shared folder have -randomly-generated links. - -``scripts/generate_travis_yml.py`` generates ``.travis.yml`` to test all -supported kernel versions (currently the mainline, stable, and longterm -releases from kernel.org). diff --git a/scripts/vmtest/mkrootfs.sh b/scripts/vmtest/mkrootfs.sh deleted file mode 100755 index b25161989..000000000 --- a/scripts/vmtest/mkrootfs.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash - -# This script was inspired by https://github.com/pierres/genbootstrap, which is -# used to generate the official Arch Linux bootstrap images. - -set -euo pipefail - -usage () { - USAGE_STRING="usage: $0 [NAME] - $0 -h - -Build an Arch Linux root filesystem image for testing drgn in a virtual -machine. - -The image is generated as a zstd-compressed tarball. - -This must be run as root, as most of the installation is done in a chroot. - -Arguments: - NAME name of generated image file (default: - drgn-vmtest-rootfs-\$DATE.tar.zst) - -Options: - -h display this help message and exit" - - case "$1" in - out) - echo "$USAGE_STRING" - exit 0 - ;; - err) - echo "$USAGE_STRING" >&2 - exit 1 - ;; - esac -} - -while getopts "h" OPT; do - case "$OPT" in - h) - usage out - ;; - *) - usage err - ;; - esac -done -if [[ $OPTIND -eq $# ]]; then - NAME="${!OPTIND}" -elif [[ $OPTIND -gt $# ]]; then - NAME="drgn-vmtest-rootfs-$(date +%Y.%m.%d).tar.zst" -else - usage err -fi - -pacman_conf= -root= -trap 'rm -rf "$pacman_conf" "$root"' EXIT -pacman_conf="$(mktemp -p "$PWD")" -cat > "$pacman_conf" << "EOF" -[options] -Architecture = auto -CheckSpace -SigLevel = Required DatabaseOptional -[core] -Include = /etc/pacman.d/mirrorlist -[extra] -Include = /etc/pacman.d/mirrorlist -[community] -Include = /etc/pacman.d/mirrorlist -EOF -root="$(mktemp -d -p "$PWD")" - -packages=( - busybox - # Required by some of the packages below. - gettext - # drgn dependencies. - autoconf - automake - bison - bzip2 - flex - gawk - gcc - libtool - make - pkgconf - python - python-setuptools - xz - zlib -) - -pacstrap -C "$pacman_conf" -cGM "$root" "${packages[@]}" - -# Remove unnecessary files from the chroot. - -# We don't need the pacman databases anymore. -rm -rf "$root/var/lib/pacman/sync/" -# We don't need D, Fortran, or Go. -rm -f "$root/usr/lib/libgdruntime."* \ - "$root/usr/lib/libgphobos."* \ - "$root/usr/lib/libgfortran."* \ - "$root/usr/lib/libgo."* -# We don't need the Python test package. -rm -rf "$root/usr/lib/python"*/test -# We don't need any documentation. -rm -rf "$root/usr/share/doc" \ - "$root/usr/share/help" \ - "$root/usr/share/man" \ - "$root/usr/share/texinfo" -# We don't need locale data. -find "$root/usr/share/i18n/locales" "$root/usr/share/locale" \ - -mindepth 1 -maxdepth 1 -not -name POSIX -exec rm -rf {} + - -chroot "$root" /bin/busybox --install - -cat > "$root/etc/fstab" << "EOF" -dev /dev devtmpfs rw,nosuid 0 0 -proc /proc proc rw,nosuid,nodev,noexec 0 0 -sys /sys sysfs rw,nosuid,nodev,noexec 0 0 -EOF -chmod 644 "$root/etc/fstab" - -cat > "$root/etc/inittab" << "EOF" -::sysinit:/etc/init.d/rcS -::ctrlaltdel:/sbin/reboot -::shutdown:/sbin/swapoff -a -::shutdown:/bin/umount -a -r -::restart:/sbin/init -EOF -chmod 644 "$root/etc/inittab" - -mkdir -m 755 "$root/etc/init.d" "$root/etc/rcS.d" -cat > "$root/etc/init.d/rcS" << "EOF" -#!/bin/sh - -/bin/mount -a - -for path in /etc/rcS.d/S*; do - [ -x "$path" ] && "$path" -done -EOF -chmod 755 "$root/etc/init.d/rcS" - -chmod 755 "$root" -tar -C "$root" -c . | zstd -T0 -19 -o "$NAME" -chmod 644 "$NAME" diff --git a/scripts/vmtest/run.sh b/scripts/vmtest/run.sh deleted file mode 100755 index f1541bc50..000000000 --- a/scripts/vmtest/run.sh +++ /dev/null @@ -1,410 +0,0 @@ -#!/bin/bash - -set -uo pipefail -trap 'exit 2' ERR - -usage () { - USAGE_STRING="usage: $0 [-k KERNELRELEASE|-b DIR] [[-r ROOTFSVERSION] [-fo]|-I] [-Si] [-d DIR] IMG - $0 [-k KERNELRELEASE] -l - $0 -h - -Run drgn tests in a virtual machine. - -This exits with status 0 on success, 1 if the virtual machine ran successfully -but tests failed, and 2 if we encountered a fatal error. - -This script uses sudo to mount and modify the disk image. - -Arguments: - IMG path of virtual machine disk image to create - -Versions: - -k, --kernel=KERNELRELEASE - kernel release to test. This is a glob pattern; the - newest (sorted by version number) release that matches - the pattern is used (default: newest available release) - - -b, --build DIR use the kernel built in the given directory. This option - cannot be combined with -k - - -r, --rootfs=ROOTFSVERSION - version of root filesystem to use (default: newest - available version) - -Setup: - -f, --force overwrite IMG if it already exists - - -o, --one-shot one-shot mode. By default, this script saves a clean copy - of the downloaded root filesystem image and vmlinux and - makes a copy (reflinked, when possible) for executing the - virtual machine. This allows subsequent runs to skip - downloading these files. If this option is given, the - root filesystem image and vmlinux are always - re-downloaded and are not saved. This option implies -f - - -I, --skip-image skip creating the disk image; use the existing one at - IMG. This option cannot be combined with -r, -f, or -o - - -S, --skip-source skip copying the source files and init scripts - -Miscellaneous: - -i, --interactive interactive mode. Boot the virtual machine into an - interactive shell instead of automatically running tests - - -d, --dir=DIR working directory to use for downloading and caching - files (default: current working directory) - - -l, --list list available kernel releases instead of running tests. - The list may be filtered with -k - - -h, --help display this help message and exit" - - case "$1" in - out) - echo "$USAGE_STRING" - exit 0 - ;; - err) - echo "$USAGE_STRING" >&2 - exit 2 - ;; - esac -} - -TEMP=$(getopt -o 'k:b:r:foISid:lh' --long 'kernel:,build:,rootfs:,force,one-shot,skip-image,skip-source,interactive,dir:,list,help' -n "$0" -- "$@") -eval set -- "$TEMP" -unset TEMP - -unset KERNELRELEASE -unset BUILDDIR -unset ROOTFSVERSION -unset IMG -FORCE=0 -ONESHOT=0 -SKIPIMG=0 -SKIPSOURCE=0 -APPEND="" -DIR="$PWD" -LIST=0 -while true; do - case "$1" in - -k|--kernel) - KERNELRELEASE="$2" - shift 2 - ;; - -b|--build) - BUILDDIR="$2" - shift 2 - ;; - -r|--rootfs) - ROOTFSVERSION="$2" - shift 2 - ;; - -f|--force) - FORCE=1 - shift - ;; - -o|--one-shot) - ONESHOT=1 - FORCE=1 - shift - ;; - -I|--skip-image) - SKIPIMG=1 - shift - ;; - -S|--skip-source) - SKIPSOURCE=1 - shift - ;; - -i|--interactive) - APPEND=" single" - shift - ;; - -d|--dir) - DIR="$2" - shift 2 - ;; - -l|--list) - LIST=1 - ;; - -h|--help) - usage out - ;; - --) - shift - break - ;; - *) - usage err - ;; - esac -done -if [[ -v BUILDDIR ]]; then - if [[ -v KERNELRELEASE ]]; then - usage err - fi -elif [[ ! -v KERNELRELEASE ]]; then - KERNELRELEASE='*' -fi -if [[ $SKIPIMG -ne 0 && ( -v ROOTFSVERSION || $FORCE -ne 0 ) ]]; then - usage err -fi -if (( LIST )); then - if [[ $# -ne 0 || -v BUILDDIR || -v ROOTFSVERSION || $FORCE -ne 0 || - $SKIPIMG -ne 0 || $SKIPSOURCE -ne 0 || -n $APPEND ]]; then - usage err - fi -else - if [[ $# -ne 1 ]]; then - usage err - fi - IMG="${!OPTIND}" -fi - -unset URLS -cache_urls() { - if ! declare -p URLS &> /dev/null; then - # This URL contains a mapping from file names to URLs where - # those files can be downloaded. - local INDEX='https://www.dropbox.com/sh/2mcf2xvg319qdaw/AAC_AbpvQPRrHF-99B2REpXja/x86_64/INDEX?dl=1' - declare -gA URLS - while IFS=$'\t' read -r name url; do - URLS["$name"]="$url" - done < <(curl -LfsS "$INDEX") - fi -} - -matching_kernel_releases() { - local pattern="$1" - { - for file in "${!URLS[@]}"; do - if [[ $file =~ ^vmlinux-(.*).zst$ ]]; then - release="${BASH_REMATCH[1]}" - case "$release" in - $pattern) - # sort -V handles rc versions properly - # if we use "~" instead of "-". - echo "${release//-rc/~rc}" - ;; - esac - fi - done - } | sort -rV | sed 's/~rc/-rc/g' -} - -newest_rootfs_version() { - { - for file in "${!URLS[@]}"; do - if [[ $file =~ ^drgn-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then - echo "${BASH_REMATCH[1]}" - fi - done - } | sort -rV | head -1 -} - -download() { - local file="$1" - cache_urls - if [[ ! -v URLS[$file] ]]; then - echo "$file not found" >&2 - return 1 - fi - echo "Downloading $file..." >&2 - curl -Lf "${URLS[$file]}" "${@:2}" -} - -set_nocow() { - touch "$@" - chattr +C "$@" >/dev/null 2>&1 || true -} - -cp_img() { - set_nocow "$2" - cp --reflink=auto "$1" "$2" -} - -create_rootfs_img() { - local path="$1" - set_nocow "$path" - truncate -s 2G "$path" - mkfs.ext4 -q "$path" -} - -download_rootfs() { - local rootfsversion="$1" - local dir="$2" - download "drgn-vmtest-rootfs-$rootfsversion.tar.zst" | - zstd -d | sudo tar -C "$dir" -x -} - -if (( LIST )); then - cache_urls - matching_kernel_releases "$KERNELRELEASE" - exit 0 -fi - -if [[ $FORCE -eq 0 && $SKIPIMG -eq 0 && -e $IMG ]]; then - echo "$IMG already exists; use -f to overwrite it or -I to reuse it" >&2 - exit 1 -fi - -# Only go to the network if it's actually a glob pattern. -if [[ -v BUILDDIR ]]; then - KERNELRELEASE="$(make -C "$BUILDDIR" -s kernelrelease)" -elif [[ ! $KERNELRELEASE =~ ^([^\\*?[]|\\[*?[])*\\?$ ]]; then - # We need to cache the list of URLs outside of the command - # substitution, which happens in a subshell. - cache_urls - KERNELRELEASE="$(matching_kernel_releases "$KERNELRELEASE" | head -1)" - if [[ -z $KERNELRELEASE ]]; then - echo "No matching kernel release found" >&2 - exit 1 - fi -fi -if [[ $SKIPIMG -eq 0 && ! -v ROOTFSVERSION ]]; then - cache_urls - ROOTFSVERSION="$(newest_rootfs_version)" -fi - -echo "Kernel release: $KERNELRELEASE" >&2 -if (( SKIPIMG )); then - echo "Not extracting root filesystem" >&2 -else - echo "Root filesystem version: $ROOTFSVERSION" >&2 -fi -echo "Disk image: $IMG" >&2 - -tmp= -ARCH_DIR="$DIR/x86_64" -mkdir -p "$ARCH_DIR" -mnt="$(mktemp -d -p "$DIR" mnt.XXXXXXXXXX)" - -cleanup() { - if [[ -n $tmp ]]; then - rm -f "$tmp" || true - fi - if mountpoint -q "$mnt"; then - sudo umount "$mnt" || true - fi - if [[ -d "$mnt" ]]; then - rmdir "$mnt" || true - fi -} -trap cleanup EXIT - -if [[ -v BUILDDIR ]]; then - vmlinuz="$BUILDDIR/$(make -C "$BUILDDIR" -s image_name)" -else - vmlinuz="$ARCH_DIR/vmlinuz-$KERNELRELEASE" - if [[ ! -e $vmlinuz ]]; then - tmp="$(mktemp "$vmlinuz.XXX.part")" - download "vmlinuz-$KERNELRELEASE" -o "$tmp" - mv "$tmp" "$vmlinuz" - tmp= - fi -fi - -# Mount and set up the rootfs image. -if (( ONESHOT )); then - rm -f "$IMG" - create_rootfs_img "$IMG" - sudo mount -o loop "$IMG" "$mnt" - download_rootfs "$ROOTFSVERSION" "$mnt" -else - if (( ! SKIPIMG )); then - rootfs_img="$ARCH_DIR/drgn-vmtest-rootfs-$ROOTFSVERSION.img" - - if [[ ! -e $rootfs_img ]]; then - tmp="$(mktemp "$rootfs_img.XXX.part")" - set_nocow "$tmp" - truncate -s 2G "$tmp" - mkfs.ext4 -q "$tmp" - sudo mount -o loop "$tmp" "$mnt" - - download_rootfs "$ROOTFSVERSION" "$mnt" - - sudo umount "$mnt" - mv "$tmp" "$rootfs_img" - tmp= - fi - - rm -f "$IMG" - cp_img "$rootfs_img" "$IMG" - fi - sudo mount -o loop "$IMG" "$mnt" -fi - -# Install vmlinux. -vmlinux="$mnt/boot/vmlinux-$KERNELRELEASE" -if [[ -v BUILDDIR || $ONESHOT -eq 0 ]]; then - if [[ -v BUILDDIR ]]; then - source_vmlinux="$BUILDDIR/vmlinux" - else - source_vmlinux="$ARCH_DIR/vmlinux-$KERNELRELEASE" - if [[ ! -e $source_vmlinux ]]; then - tmp="$(mktemp "$source_vmlinux.XXX.part")" - download "vmlinux-$KERNELRELEASE.zst" | zstd -dfo "$tmp" - mv "$tmp" "$source_vmlinux" - tmp= - fi - fi - echo "Copying vmlinux..." >&2 - sudo rsync -cp --chmod 0644 "$source_vmlinux" "$vmlinux" -else - # We could use "sudo zstd -o", but let's not run zstd as root with - # input from the internet. - download "vmlinux-$KERNELRELEASE.zst" | - zstd -d | sudo tee "$vmlinux" > /dev/null - sudo chmod 644 "$vmlinux" -fi - -if (( SKIPSOURCE )); then - echo "Not copying source files..." >&2 -else - "${PYTHON:-python3}" setup.py egg_info - - echo "Copying source files..." >&2 - - # Copy the source files in. - sudo mkdir -p -m 0755 "$mnt/drgn" - sudo rsync --files-from=drgn.egg-info/SOURCES.txt -cpt . "$mnt/drgn" - - # Create the init scripts. - sudo tee "$mnt/etc/rcS.d/S50-run-tests" > /dev/null << "EOF" -#!/bin/sh - -# Force the Linux helper tests to run and fail if they would be skipped -# otherwise. -export DRGN_RUN_LINUX_HELPER_TESTS=1 -cd /drgn && python3 --version && python3 setup.py build test -echo $? > /exitstatus -chmod 644 /exitstatus -EOF - sudo chmod 755 "$mnt/etc/rcS.d/S50-run-tests" - - sudo tee "$mnt/etc/rcS.d/S99-poweroff" > /dev/null << "EOF" -#!/bin/sh - -poweroff -EOF - sudo chmod 755 "$mnt/etc/rcS.d/S99-poweroff" -fi - -sudo umount "$mnt" - -echo "Starting virtual machine..." >&2 -qemu-system-x86_64 -nodefaults -display none -serial mon:stdio \ - -cpu kvm64 -enable-kvm -smp "$(nproc)" -m 2G \ - -drive file="$IMG",format=raw,index=1,media=disk,if=virtio,cache=none \ - -kernel "$vmlinuz" -append "root=/dev/vda rw console=ttyS0,115200$APPEND" - -sudo mount -o loop "$IMG" "$mnt" -if exitstatus="$(cat "$mnt/exitstatus" 2>/dev/null)"; then - printf '\nTests exit status: %s\n' "$exitstatus" >&2 -else - printf '\nCould not read tests exit status\n' >&2 - exitstatus=1 -fi -sudo umount "$mnt" -exit "$exitstatus" diff --git a/setup.py b/setup.py index d14e6a9c1..21eccaaba 100755 --- a/setup.py +++ b/setup.py @@ -4,12 +4,13 @@ from distutils import log from distutils.command.build import build as _build from distutils.dir_util import mkpath +from distutils.errors import DistutilsError from distutils.file_util import copy_file import os import os.path import re import pkg_resources -from setuptools import setup, find_packages +from setuptools import setup, find_packages, Command from setuptools.command.build_ext import build_ext as _build_ext from setuptools.command.egg_info import egg_info as _egg_info from setuptools.extension import Extension @@ -17,20 +18,14 @@ import subprocess import sys +from util import nproc, out_of_date + class build(_build): def finalize_options(self): super().finalize_options() if self.parallel is None: - self.parallel = len(os.sched_getaffinity(0)) + 1 - - -def out_of_date(path, *deps): - try: - mtime = os.stat(path).st_mtime - except FileNotFoundError: - return True - return any(os.stat(dep).st_mtime > mtime for dep in deps) + self.parallel = nproc() + 1 class build_ext(_build_ext): @@ -124,6 +119,123 @@ def run(self): super().run() +class test(Command): + description = "run unit tests after in-place build" + + KERNELS = ["5.6", "5.5", "5.4", "4.19", "4.14", "4.9", "4.4"] + + user_options = [ + ( + "kernel", + "K", + "run Linux kernel helper tests in a virtual machine on all supported kernels " + f"({', '.join(KERNELS)})", + ), + ( + "extra-kernels=", + "k", + "additional kernels to run Linux kernel helper tests on in a virtual machine " + "(comma-separated list of kernel build directory path or " + "wildcard pattern matching uploaded kernel release strings)", + ), + ( + "vmtest-dir=", + "d", + "directory for built artifacts and downloaded kernels for virtual machine tests (default: 'build/vmtest')", + ), + ] + + def initialize_options(self): + self.kernel = False + self.extra_kernels = "" + self.vmtest_dir = None + + def finalize_options(self): + self.kernels = [kernel for kernel in self.extra_kernels.split(",") if kernel] + if self.kernel: + self.kernels.extend(kernel + ".*" for kernel in test.KERNELS) + if self.vmtest_dir is None: + build_base = self.get_finalized_command("build").build_base + self.vmtest_dir = os.path.join(build_base, "vmtest") + + def _run_local(self): + import unittest + + argv = ["discover"] + if self.verbose: + argv.append("-v") + test = unittest.main(module=None, argv=argv, exit=False) + return test.result.wasSuccessful() + + def _run_vm(self, **kwds): + import vmtest.vm + + try: + with vmtest.vm.VM(**kwds) as vm: + args = [ + # fmt: off + sys.executable, "-B", "-m", + "unittest", "discover", "-t", ".", "-s", "tests/helpers/linux", + # fmt: on + ] + if self.verbose: + args.append("-v") + return vm.run( + args, cwd=os.getcwd(), env={"DRGN_RUN_LINUX_HELPER_TESTS": "1"} + ).returncode == 0 + except vmtest.vm.LostVMError as e: + self.announce(f"error: {e}", log.ERROR) + return False + + def run(self): + import vmtest.build + import vmtest.resolver + + # Start downloads ASAP so that they're hopefully done by the time we + # need them. + with vmtest.resolver.KernelResolver(self.kernels, self.vmtest_dir) as resolver: + if self.kernels: + self.announce( + "downloading/preparing kernels in the background", log.INFO + ) + self.run_command("egg_info") + self.reinitialize_command("build_ext", inplace=1) + self.run_command("build_ext") + + passed = [] + failed = [] + + if self.kernels: + self.announce("running tests locally", log.INFO) + if self._run_local(): + passed.append("local") + else: + failed.append("local") + + if self.kernels: + built = vmtest.build.build_vmtest(self.vmtest_dir) + for kernel in resolver: + self.announce( + f"running tests in VM on Linux {kernel.release}", log.INFO + ) + if self._run_vm( + **built, vmlinux=kernel.vmlinux, vmlinuz=kernel.vmlinuz, + ): + passed.append(kernel.release) + else: + failed.append(kernel.release) + + if passed: + self.announce(f'Passed: {", ".join(passed)}', log.INFO) + if failed: + self.announce(f'Failed: {", ".join(failed)}', log.ERROR) + + if failed: + raise DistutilsError("some tests failed") + else: + self.announce("all tests passed", log.INFO) + + def get_version(): if not os.path.exists(".git"): # If this is a source distribution, get the version from the egg @@ -183,7 +295,12 @@ def get_version(): # This is here so that setuptools knows that we have an extension; it's # actually built using autotools/make. ext_modules=[Extension(name="_drgn", sources=[])], - cmdclass={"build": build, "build_ext": build_ext, "egg_info": egg_info}, + cmdclass={ + "build": build, + "build_ext": build_ext, + "egg_info": egg_info, + "test": test, + }, entry_points={"console_scripts": ["drgn=drgn.internal.cli:main"],}, python_requires=">=3.6", author="Omar Sandoval", diff --git a/tests/test_util.py b/tests/test_util.py new file mode 100644 index 000000000..bf5cef82f --- /dev/null +++ b/tests/test_util.py @@ -0,0 +1,27 @@ +from functools import cmp_to_key +import unittest + +from util import KernelVersion, verrevcmp + + +class TestUtil(unittest.TestCase): + def assertVersionSort(self, sorted_list): + self.assertEqual(sorted(sorted_list, key=cmp_to_key(verrevcmp)), sorted_list) + + def test_verrevcmp(self): + self.assertVersionSort( + ["0~", "0", "1", "1.0", "1.1~rc1", "1.1~rc2", "1.1", "1.2", "1.12"] + ) + self.assertVersionSort(["a", "."]) + self.assertVersionSort(["", "1"]) + self.assertVersionSort(["~", "~1"]) + self.assertVersionSort(["~~", "~~a", "~", "", "a"]) + + def test_kernel_version(self): + self.assertLess(KernelVersion("1.0"), KernelVersion("2.0")) + self.assertLess(KernelVersion("5.6.0-rc6"), KernelVersion("5.6.0-rc7")) + self.assertLess(KernelVersion("5.6.0-rc7"), KernelVersion("5.6.0")) + self.assertLess( + KernelVersion("5.6.0-rc7-vmtest2"), KernelVersion("5.6.0-vmtest1") + ) + self.assertLess(KernelVersion("5.6.0-vmtest1"), KernelVersion("5.6.0-vmtest2")) diff --git a/util.py b/util.py new file mode 100644 index 000000000..982bd9553 --- /dev/null +++ b/util.py @@ -0,0 +1,109 @@ +# Copyright 2020 - Omar Sandoval +# SPDX-License-Identifier: GPL-3.0+ + +from functools import total_ordering +import os +import re + + +def nproc() -> int: + return len(os.sched_getaffinity(0)) + + +def out_of_date(path: str, *deps: str) -> bool: + try: + mtime = os.stat(path).st_mtime + except FileNotFoundError: + return True + return any(os.stat(dep).st_mtime > mtime for dep in deps) + + +def _c_isdigit(c: int) -> bool: + # '0' <= c <= '9' + return 0x30 <= c <= 0x39 + + +def _c_isalpha(c: int) -> bool: + # ('A' <= c <= 'Z') or ('a' <= c <= 'z') + return (0x41 <= c <= 0x5A) or (0x61 <= c <= 0x7A) + + +def _order(c: int) -> int: + if _c_isdigit(c): + return 0 + elif _c_isalpha(c): + return c + elif c == 0x7E: # '~' + return -1 + else: + return c + 0x100 + + +def verrevcmp(v1: str, v2: str) -> int: + """ + Compare two versions according to the coreutils version sort rules + (https://www.gnu.org/software/coreutils/manual/html_node/Version_002dsort-ordering-rules.html). + Returns 0 if v1 == v2 by this definition, < 0 if v1 < v2, and > 0 if v1 > + v2. + + Adapted from + https://git.savannah.gnu.org/cgit/gnulib.git/tree/lib/filevercmp.c. + """ + # By definition, version sort compares ASCII, not Unicode: + # https://www.gnu.org/software/coreutils/manual/html_node/Version-sort-ignores-locale.html. + s1 = bytearray(v1, "utf-8") + s2 = bytearray(v2, "utf-8") + s1_len = len(s1) + s2_len = len(s2) + # Add sentinels to avoid some length checks. + s1.append(0) + s2.append(0) + s1_pos = s2_pos = 0 + while s1_pos < s1_len or s2_pos < s2_len: + while (s1_pos < s1_len and not _c_isdigit(s1[s1_pos])) or ( + s2_pos < s2_len and not _c_isdigit(s2[s2_pos]) + ): + s1_c = _order(s1[s1_pos]) if s1_pos < s1_len else 0 + s2_c = _order(s2[s2_pos]) if s2_pos < s2_len else 0 + if s1_c != s2_c: + return s1_c - s2_c + s1_pos += 1 + s2_pos += 1 + while s1[s1_pos] == 0x30: # '0' + s1_pos += 1 + while s2[s2_pos] == 0x30: # '0' + s2_pos += 1 + first_diff = 0 + while _c_isdigit(s1[s1_pos]) and _c_isdigit(s2[s2_pos]): + if not first_diff: + first_diff = s1[s1_pos] - s2[s2_pos] + s1_pos += 1 + s2_pos += 1 + if _c_isdigit(s1[s1_pos]): + return 1 + if _c_isdigit(s2[s2_pos]): + return -1 + if first_diff: + return first_diff + return 0 + + +@total_ordering +class KernelVersion: + """ + Version ordered by verrevcmp(), with -rc releases before the final release. + """ + + def __init__(self, release: str) -> None: + # ~ sorts before anything, including the end of the version. + self._key = re.sub(r"-(rc[0-9])", r"~\1", release) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, KernelVersion): + return NotImplemented + return self._key == other._key + + def __lt__(self, other: object) -> bool: + if not isinstance(other, KernelVersion): + return NotImplemented + return verrevcmp(self._key, other._key) < 0 diff --git a/vmtest/README.rst b/vmtest/README.rst new file mode 100644 index 000000000..2859d6c18 --- /dev/null +++ b/vmtest/README.rst @@ -0,0 +1,49 @@ +drgn VM Testing +=============== + +drgn has a significant amount of code (both core and in helpers) which is +dependent on the Linux kernel version. This code is tested on multiple Linux +kernel versions in a virtual machine. These tests can be run on all supported +kernels with ``python3 setup.py test -K``. This requires QEMU and zstd to be +installed. + +Tests can also be run on specific kernels with ``-k``. This takes a +comma-separated list of kernels which are either a wildcard pattern (e.g., +``5.6.*``) that matches a kernel release hosted on Dropbox (see below) or a +kernel build directory path starting with ``.`` or ``/``. + +Architecture +------------ + +The goal of vmtest is to run tests in the same userspace environment as the +host, but with a different kernel. The host runs the virtual machine with `QEMU +`_ (see the `vmtest.vm `_ module). + +The guest mounts the host's root filesystem as its own root filesystem via +`VirtFS `_. It is mounted read-only for +safety. To support modifications, the guest uses `OverlayFS +`_ to +overlay a read-write tmpfs over the VirtFS root. + +The guest runs a `special init process `_ which sets up the system and +filesystem hierarchy (including creating the appropriate link to vmlinux) and +communicates with the host over `virtio-serial +`_. The protocol is +essentially that the host sends the arguments to `execvpe(3) +`_ and the guest replies with +the `wait(2) `_ status. + +This infrastructure is all fairly generic. The drgn-specific parts are: + +1. The kernel builds. The `kernel configuration `_ includes everything + required to run drgn and the Linux kernel helper tests. These builds are + hosted on `Dropbox + `_. + They are managed via the Dropbox API by the `vmtest.manage `_ CLI + and downloaded by the `vmtest.resolver `_ module. +2. The test command itself. This is just some ``setup.py`` glue and the proper + invocation of the Python `unittest command line interface + `_. + +The ``vmtest.vm`` and ``vmtest.resolver`` modules also have CLIs for testing +purposes. These are subject to change. diff --git a/vmtest/__init__.py b/vmtest/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/vmtest/build.py b/vmtest/build.py new file mode 100644 index 000000000..7f608c3f9 --- /dev/null +++ b/vmtest/build.py @@ -0,0 +1,79 @@ +import os +import os.path +import shlex +import subprocess +from typing import Dict + +from util import out_of_date + + +def _compile( + *args: str, + CPPFLAGS: str = "", + CFLAGS: str = "", + LDFLAGS: str = "", + LIBADD: str = "" +) -> None: + # This mimics automake: the order of the arguments allows for the default + # flags to be overridden by environment variables, and we use the same + # default CFLAGS. + cmd = [ + os.getenv("CC", "cc"), + *shlex.split(CPPFLAGS), + *shlex.split(os.getenv("CPPFLAGS", "")), + *shlex.split(CFLAGS), + *shlex.split(os.getenv("CFLAGS", "-g -O2")), + *shlex.split(LDFLAGS), + *shlex.split(os.getenv("LDFLAGS", "")), + *args, + *shlex.split(LIBADD), + *shlex.split(os.getenv("LIBS", "")), + ] + print(" ".join([shlex.quote(arg) for arg in cmd])) + subprocess.check_call(cmd) + + +def build_vmtest(dir: str) -> Dict[str, str]: + os.makedirs(dir, exist_ok=True) + + init = os.path.join(dir, "init") + init_c = os.path.relpath(os.path.join(os.path.dirname(__file__), "init.c")) + if out_of_date(init, init_c): + _compile("-o", init, init_c, CPPFLAGS="-D_GNU_SOURCE", LDFLAGS="-static") + + onoatimehack_so = os.path.join(dir, "onoatimehack.so") + onoatimehack_c = os.path.relpath( + os.path.join(os.path.dirname(__file__), "onoatimehack.c") + ) + if out_of_date(onoatimehack_so, onoatimehack_c): + _compile( + "-o", + onoatimehack_so, + onoatimehack_c, + CPPFLAGS="-D_GNU_SOURCE", + CFLAGS="-fPIC", + LDFLAGS="-shared", + LIBADD="-ldl", + ) + + return { + "init": init, + "onoatimehack": onoatimehack_so, + } + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser( + description="build vmtest files", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "directory", + nargs="?", + default="build/vmtest", + help="directory to put built files in", + ) + args = parser.parse_args() + print(build_vmtest(args.directory)) diff --git a/vmtest/config b/vmtest/config new file mode 100644 index 000000000..bf8ce5217 --- /dev/null +++ b/vmtest/config @@ -0,0 +1,57 @@ +# Minimal Linux kernel configuration for booting into vmtest and running drgn +# tests. + +CONFIG_LOCALVERSION="-vmtest1" + +CONFIG_SMP=y + +# No modules to simplify installing the kernel into the root filesystem image. +CONFIG_MODULES=n + +# We run the tests in KVM. +CONFIG_HYPERVISOR_GUEST=y +CONFIG_KVM_GUEST=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_SPINLOCKS=y + +# Minimum requirements for vmtest. +CONFIG_9P_FS=y +CONFIG_DEVTMPFS=y +CONFIG_INET=y +CONFIG_NET=y +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_OVERLAY_FS=y +CONFIG_PCI=y +CONFIG_PROC_FS=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_XATTR=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_PCI=y + +# drgn needs /proc/kcore for live debugging. +CONFIG_PROC_KCORE=y +# In some cases, it also needs /proc/kallsyms. +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y + +# drgn needs debug info. +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y + +# Before Linux kernel commit 8757dc970f55 ("x86/crash: Define +# arch_crash_save_vmcoreinfo() if CONFIG_CRASH_CORE=y") (in v5.6), some +# important information in VMCOREINFO is initialized by the kexec code. +CONFIG_KEXEC=y + +# For block tests. +CONFIG_BLK_DEV_LOOP=y + +# For kconfig tests. +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y diff --git a/vmtest/init.c b/vmtest/init.c new file mode 100644 index 000000000..9a136e8cd --- /dev/null +++ b/vmtest/init.c @@ -0,0 +1,517 @@ +// Copyright 2020 - Omar Sandoval +// SPDX-License-Identifier: GPL-3.0+ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HOSTNAME "vmtest" +#define VPORT_NAME "com.osandov.vmtest.0" + +__attribute__((format(printf, 1, 2))) +static void poweroff(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + vfprintf(stderr, format, ap); + va_end(ap); + if (reboot(RB_POWER_OFF)) + perror("reboot"); + _exit(EXIT_FAILURE); +} + +#define CHECK(func, ...) ({ \ + __auto_type _ret = func(__VA_ARGS__); \ + if (_ret == -1) \ + poweroff(#func ": %m\n"); \ + _ret; \ +}) + +#define CHECK1(func, arg1, ...) ({ \ + __auto_type _arg1 = (arg1); \ + __auto_type _ret = func(arg1, ##__VA_ARGS__); \ + if (_ret == -1) \ + poweroff(#func ": %s: %m\n", _arg1); \ + _ret; \ +}) + +#define CHECK2(func, arg1, arg2, ...) ({ \ + __auto_type _arg2 = (arg2); \ + __auto_type _ret = func((arg1), _arg2, ##__VA_ARGS__); \ + if (_ret == -1) \ + poweroff(#func ": %s: %m\n", _arg2); \ + _ret; \ +}) + +#define CHECKP(func, ...) ({ \ + __auto_type _ret = func(__VA_ARGS__); \ + if (_ret == (void *)0) \ + poweroff(#func ": %m\n"); \ + _ret; \ +}) + +#define CHECKP1(func, arg1, ...) ({ \ + __auto_type _arg1 = (arg1); \ + __auto_type _ret = func(arg1, ##__VA_ARGS__); \ + if (_ret == (void *)0) \ + poweroff(#func ": %s: %m\n", _arg1); \ + _ret; \ +}) + +static void write_text_file(const char *pathname, const char *contents) +{ + const char *p = contents, *end = p + strlen(contents); + int fd; + + fd = CHECK1(creat, pathname, 0644); + while (p < end) { + ssize_t ret; + + ret = write(fd, p, end - p); + if (ret == -1) + poweroff("write: %s\n", pathname); + p += ret; + } + close(fd); +} + +static void setup_vmlinux(void) +{ + const char *vmlinux = getenv("VMLINUX"); + struct utsname uts; +#define BOOT_VMLINUX_FORMAT "/mnt/upper/boot/vmlinux-%s" + /* - 3 for %s\0 */ + char path[sizeof(BOOT_VMLINUX_FORMAT) - 3 + sizeof(uts.release)]; + + if (!vmlinux) + return; + + CHECK(uname, &uts); + snprintf(path, sizeof(path), BOOT_VMLINUX_FORMAT, uts.release); + CHECK1(mkdir, "/mnt/upper/boot", 0755); + CHECK2(symlink, vmlinux, path); +} + +static void setup_fs(void) +{ + + CHECK2(mount, "tmpfs", "/mnt", "tmpfs", 0, ""); + CHECK1(mkdir, "/mnt/upper", 0755); + CHECK1(mkdir, "/mnt/work", 0755); + CHECK1(mkdir, "/mnt/merged", 0755); + + CHECK1(mkdir, "/mnt/upper/dev", 0755); + CHECK1(mkdir, "/mnt/upper/proc", 0555); + CHECK1(mkdir, "/mnt/upper/sys", 0555); + CHECK1(mkdir, "/mnt/upper/tmp", 01777); + + CHECK1(mkdir, "/mnt/upper/etc", 0755); + write_text_file("/mnt/upper/etc/hosts", + "127.0.0.1 localhost\n" + "::1 localhost\n" + "127.0.1.1 " HOSTNAME ".localdomain " HOSTNAME "\n"); + write_text_file("/mnt/upper/etc/resolv.conf", ""); + + setup_vmlinux(); + + CHECK2(mount, "overlay", "/mnt/merged", "overlay", 0, + "lowerdir=/,upperdir=/mnt/upper,workdir=/mnt/work"); + + CHECK2(syscall, SYS_pivot_root, "/mnt/merged", "/mnt/merged/mnt"); + CHECK1(chdir, "/"); + CHECK1(umount2, "/mnt", MNT_DETACH); + + CHECK2(mount, "dev", "/dev", "devtmpfs", MS_NOSUID | MS_NOEXEC, ""); + CHECK2(mount, "proc", "/proc", "proc", MS_NOSUID | MS_NODEV | MS_NOEXEC, + ""); + CHECK2(mount, "sys", "/sys", "sysfs", MS_NOSUID | MS_NODEV | MS_NOEXEC, + ""); + /* + * Ideally we'd just be able to create an opaque directory for /tmp on + * the upper layer. However, before Linux kernel commit 51f7e52dc943 + * ("ovl: share inode for hard link") (in v4.8), overlayfs doesn't + * handle hard links correctly, which breaks some tests. + */ + CHECK2(mount, "tmpfs", "/tmp", "tmpfs", MS_NOSUID | MS_NODEV, ""); +} + +static void setup_net(void) +{ + struct ifreq ifr = { .ifr_name = "lo" }; + int fd; + + CHECK(sethostname, HOSTNAME, strlen(HOSTNAME)); + + fd = CHECK(socket, AF_INET, SOCK_DGRAM | SOCK_CLOEXEC, IPPROTO_IP); + if (ioctl(fd, SIOCGIFFLAGS, &ifr) == -1) + poweroff("ioctl: SIOCGIFFLAGS: %m\n"); + ifr.ifr_flags |= IFF_UP; + if (ioctl(fd, SIOCSIFFLAGS, &ifr) == -1) + poweroff("ioctl: SIOCSIFFLAGS: %m\n"); + close(fd); +} + +static int open_vport(void) +{ + DIR *dir; + char buf[1024]; + + dir = CHECKP1(opendir, "/sys/class/virtio-ports"); + for (;;) { + struct dirent *ent; + FILE *file; + bool got_line; + + errno = 0; + ent = readdir(dir); + if (!ent) { + if (errno) { + poweroff("readdir: /sys/class/virtio-ports: %m\n"); + } else { + poweroff("could not find virtio-port \"%s\"\n", + VPORT_NAME); + } + } + if (ent->d_name[0] == '.') + continue; + + snprintf(buf, sizeof(buf), "/sys/class/virtio-ports/%s/name", + ent->d_name); + file = fopen(buf, "re"); + if (!file) { + if (errno == ENOENT) + continue; + else + poweroff("fopen: %s: %m\n", buf); + } + got_line = fgets(buf, sizeof(buf), file); + fclose(file); + if (!got_line || strcmp(buf, VPORT_NAME "\n") != 0) + continue; + + snprintf(buf, sizeof(buf), "/dev/%s", ent->d_name); + closedir(dir); + return CHECK1(open, buf, O_RDWR | O_NONBLOCK | O_CLOEXEC); + } +} + +struct vec { + char **vec; + size_t capacity; + uint32_t count; + uint32_t pos; +}; + +enum { + SIGNALFD, + VPORTFD, +}; + +struct state { + enum { + STATE_ARGC, + STATE_ARG, + STATE_ARG_NUL, + STATE_ENV_COUNT, + STATE_ENV, + STATE_ENV_NUL, + STATE_EXECUTABLE, + STATE_EXECUTABLE_NUL, + STATE_CWD, + STATE_CWD_NUL, + STATE_EXEC, + STATE_WAIT, + STATE_STATUS_LO, + STATE_STATUS_HI, + } state; + + pid_t child; + uint16_t wstatus; + + struct pollfd fds[2]; + + char *buf; + size_t capacity; + size_t len; + size_t pos; + + struct vec args; + struct vec env; + char *executable; + char *cwd; +}; + +static void handle_signalfd_read(struct state *state) +{ + struct signalfd_siginfo siginfo; + ssize_t ret; + + ret = read(state->fds[SIGNALFD].fd, &siginfo, sizeof(siginfo)); + if (ret == -1) { + if (errno == EAGAIN) + return; + else + poweroff("read: signalfd\n"); + } + if (ret < sizeof(siginfo)) + return; + if (siginfo.ssi_signo == SIGCHLD) { + pid_t pid; + int wstatus; + + while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) { + if (state->state == STATE_WAIT && pid == state->child) { + if (WIFEXITED(wstatus)) { + printf("Exited with status %d\n", + WEXITSTATUS(wstatus)); + } else { + printf("Terminated by signal %d\n", + WTERMSIG(wstatus)); + } + state->wstatus = htole16(wstatus); + state->fds[VPORTFD].events |= POLLOUT; + state->state = STATE_STATUS_LO; + } + } + if (pid == -1 && errno != ECHILD) + poweroff("waitpid"); + } +} + +static void handle_vport_read(struct state *state) +{ + for (;;) { + ssize_t ret; + + if (state->len >= state->capacity) { + char *tmp; + uint32_t i; + + if (state->capacity) + state->capacity *= 2; + else + state->capacity = 4096; + tmp = CHECKP(malloc, state->capacity); + memcpy(tmp, state->buf, state->len); + for (i = 0; i < state->args.pos; i++) + state->args.vec[i] = + tmp + (state->args.vec[i] - state->buf); + for (i = 0; i < state->env.pos; i++) + state->env.vec[i] = + tmp + (state->env.vec[i] - state->buf); + if (state->executable) + state->executable = + tmp + (state->executable - state->buf); + if (state->cwd) + state->cwd = tmp + (state->cwd - state->buf); + free(state->buf); + state->buf = tmp; + } + + ret = read(state->fds[VPORTFD].fd, state->buf + state->len, + state->capacity - state->len); + if (ret == 0 || (ret == -1 && errno == EAGAIN)) + break; + else if (ret == -1) + poweroff("read: vport: %m\n"); + state->len += ret; + } + + while (state->pos < state->len && state->state < STATE_EXEC) { + switch (state->state) { + case STATE_ARGC: + case STATE_ENV_COUNT: { + struct vec *vec; + uint32_t count; + + if (state->len - state->pos < sizeof(count)) + return; + if (state->state == STATE_ARGC) + vec = &state->args; + else + vec = &state->env; + memcpy(&count, &state->buf[state->pos], sizeof(count)); + state->pos += sizeof(count); + vec->count = le32toh(count); + if (vec->count >= vec->capacity) { + size_t size; + + /* One extra element for NULL pointer. */ + if (__builtin_mul_overflow(sizeof(*vec->vec), + vec->count, &size) || + __builtin_add_overflow(size, + sizeof(*vec->vec), + &size)) + poweroff("count is too large\n"); + vec->vec = CHECKP(realloc, vec->vec, size); + vec->capacity = vec->count; + } + vec->vec[vec->count] = NULL; + state->state += vec->count ? 1 : 3; + break; + } + case STATE_ARG: + case STATE_ENV: + case STATE_EXECUTABLE: + case STATE_CWD: { + char **str; + + if (state->state == STATE_ARG) + str = &state->args.vec[state->args.pos++]; + else if (state->state == STATE_ENV) + str = &state->env.vec[state->env.pos++]; + else if (state->state == STATE_EXECUTABLE) + str = &state->executable; + else /* (state->state == STATE_CWD) */ + str = &state->cwd; + *str = &state->buf[state->pos]; + state->state++; + /* fallthrough */ + } + case STATE_ARG_NUL: + case STATE_ENV_NUL: + case STATE_EXECUTABLE_NUL: + case STATE_CWD_NUL: { + struct vec *vec; + char *nul; + + if (state->state == STATE_ARG_NUL) + vec = &state->args; + else if (state->state == STATE_ENV_NUL) + vec = &state->env; + else + vec = NULL; + nul = memchr(&state->buf[state->pos], 0, + state->len - state->pos); + if (nul) { + state->pos = nul + 1 - state->buf; + if (!vec || vec->pos == vec->count) + state->state++; + else + state->state--; + } else { + state->pos = state->len; + } + break; + default: + assert(false); + break; + } + } + } + + if (state->state == STATE_EXEC) { + uint32_t i; + pid_t pid; + + printf("Executing"); + for (i = 0; i < state->args.count; i++) + printf(" %s", state->args.vec[i]); + printf("\n"); + pid = CHECK(fork); + if (pid == 0) { + int status; + + if (state->cwd[0] && chdir(state->cwd) == -1) { + fprintf(stderr, "chdir: %s: %m\n", state->cwd); + _exit(EXIT_FAILURE); + } + execvpe(state->executable, state->args.vec, + state->env.vec); + /* Mimic bash exit status. */ + status = errno == ENOENT ? 127 : 126; + perror("execvpe"); + _exit(status); + } + state->child = pid; + state->fds[VPORTFD].revents &= ~POLLIN; + state->len -= state->pos; + memmove(state->buf, &state->buf[state->pos], state->len); + state->pos = 0; + state->args.pos = 0; + state->env.pos = 0; + state->executable = NULL; + state->cwd = NULL; + state->state = STATE_WAIT; + } +} + +static void handle_vport_write(struct state *state) +{ + ssize_t ret; + + assert(state->state == STATE_STATUS_LO || + state->state == STATE_STATUS_HI); + ret = write(state->fds[VPORTFD].fd, + (char *)&state->wstatus + (state->state - STATE_STATUS_LO), + STATE_STATUS_HI - STATE_STATUS_LO + 1); + if (ret == -1) { + if (errno == EAGAIN) + return; + else + poweroff("write: vport: %m\n"); + } + state->state += ret; + if (state->state > STATE_STATUS_HI) { + state->fds[VPORTFD].events &= ~POLLOUT; + state->state = STATE_ARGC; + } +} + +int main(void) +{ + sigset_t sigs; + struct state state = { .state = STATE_ARGC }; + + CHECK(sigemptyset, &sigs); + CHECK(sigaddset, &sigs, SIGCHLD); + CHECK(sigprocmask, SIG_BLOCK, &sigs, NULL); + + state.fds[SIGNALFD].fd = CHECK(signalfd, -1, &sigs, + SFD_NONBLOCK | SFD_CLOEXEC); + state.fds[SIGNALFD].events = POLLIN; + + setup_fs(); + setup_net(); + + state.fds[VPORTFD].fd = open_vport(); + state.fds[VPORTFD].events = POLLIN; + + for (;;) { + CHECK(poll, state.fds, sizeof(state.fds) / sizeof(state.fds[0]), + -1); + + if (state.fds[SIGNALFD].revents & POLLIN) + handle_signalfd_read(&state); + + if (state.fds[VPORTFD].revents & POLLIN) + handle_vport_read(&state); + if (state.fds[VPORTFD].revents & POLLOUT) + handle_vport_write(&state); + if (state.fds[VPORTFD].revents & POLLHUP) + poweroff("Host disconnected\n"); + } +} diff --git a/scripts/vmtest/manage.py b/vmtest/manage.py old mode 100755 new mode 100644 similarity index 72% rename from scripts/vmtest/manage.py rename to vmtest/manage.py index 748d32dc7..7b121f14e --- a/scripts/vmtest/manage.py +++ b/vmtest/manage.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python3 +# Copyright 2020 - Omar Sandoval +# SPDX-License-Identifier: GPL-3.0+ import aiohttp import argparse @@ -8,14 +9,30 @@ import io import json import logging -import multiprocessing import os import os.path import re import shlex +import shutil import sys import time +from typing import ( + Any, + AsyncGenerator, + BinaryIO, + Dict, + List, + Optional, + Set, + SupportsFloat, + SupportsRound, + TextIO, + Tuple, +) import urllib.parse +from yarl import URL + +from util import nproc logger = logging.getLogger("asyncio") @@ -24,58 +41,11 @@ KERNEL_ORG_JSON = "https://www.kernel.org/releases.json" -DEFCONFIG = """\ -# Minimal configuration for booting into the root filesystem image and building -# and testing drgn on a live kernel. - -CONFIG_SMP=y - -# No modules to simplify installing the kernel into the root filesystem image. -CONFIG_MODULES=n - -# We run the tests in KVM. -CONFIG_HYPERVISOR_GUEST=y -CONFIG_KVM_GUEST=y -CONFIG_PARAVIRT=y -CONFIG_PARAVIRT_SPINLOCKS=y - -# Minimum requirements for booting up. -CONFIG_DEVTMPFS=y -CONFIG_EXT4_FS=y -CONFIG_PCI=y -CONFIG_PROC_FS=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SYSFS=y -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_PCI=y - -# drgn needs /proc/kcore for live debugging. -CONFIG_PROC_KCORE=y -# In some cases, it also needs /proc/kallsyms. -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y - -# drgn needs debug info. -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_INFO_DWARF4=y - -# Some important information in VMCOREINFO is initialized in the kexec code for -# some reason. -CONFIG_KEXEC=y - -# In case we need to refer to the kernel config in the future. -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -""" - - DROPBOX_API_URL = "https://api.dropboxapi.com" CONTENT_API_URL = "https://content.dropboxapi.com" -def humanize_size(n, precision=1): +def humanize_size(n: SupportsFloat, precision: int = 1) -> str: n = float(n) for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: if abs(n) < 1024: @@ -88,19 +58,23 @@ def humanize_size(n, precision=1): return f"{n:.{precision}f}{unit}B" -def humanize_duration(seconds): +def humanize_duration(seconds: SupportsRound[Any]) -> str: seconds = round(seconds) return f"{seconds // 60}m{seconds % 60}s" -# Like aiohttp.ClientResponse.raise_for_status(), but includes the response -# body. -async def raise_for_status_body(resp): +async def raise_for_status_body(resp: aiohttp.ClientResponse) -> None: + """ + Like aiohttp.ClientResponse.raise_for_status(), but includes the response + body. + """ if resp.status >= 400: - message = resp.reason + message = resp.reason or "" body = await resp.text() if body: - message += ": " + body + if message: + message += ": " + message += body raise aiohttp.ClientResponseError( resp.request_info, resp.history, @@ -110,11 +84,17 @@ async def raise_for_status_body(resp): ) -async def get_kernel_org_releases(http_client): +def get_current_localversion() -> str: + with open(os.path.join(os.path.dirname(__file__), "config"), "r") as f: + match = re.search(r'^CONFIG_LOCALVERSION="([^"]*)"', f.read(), re.MULTILINE) + return match.group(1) if match else "" + + +async def get_kernel_org_versions(http_client: aiohttp.ClientSession) -> List[str]: async with http_client.get(KERNEL_ORG_JSON, raise_for_status=True) as resp: releases = (await resp.json())["releases"] return [ - "v" + release["version"] + release["version"] for release in releases if release["moniker"] in {"mainline", "stable", "longterm"} # 3.16 seems to be missing "x86/build/64: Force the linker to use @@ -125,28 +105,27 @@ async def get_kernel_org_releases(http_client): ] -async def get_available_kernel_releases(http_client, token): +async def get_available_kernel_releases( + http_client: aiohttp.ClientSession, token: str +) -> Set[str]: headers = {"Authorization": "Bearer " + token} params = {"path": "/Public/x86_64"} url = DROPBOX_API_URL + "/2/files/list_folder" available = set() while True: async with http_client.post(url, headers=headers, json=params) as resp: + if resp.status == 409 and (await resp.json())["error_summary"].startswith( + "path/not_found/" + ): + break + await raise_for_status_body(resp) obj = await resp.json() for entry in obj["entries"]: if entry[".tag"] != "file": continue - match = re.fullmatch( - r"vmlinux-(\d+)\.(\d+)\.(\d+)(-rc\d+)?\.zst", entry["name"] - ) - if not match: - continue - version = f"v{match.group(1)}.{match.group(2)}" - if match.group(3) != "0": - version += "." + match.group(3) - if match.group(4): - version += match.group(4) - available.add(version) + match = re.fullmatch(r"vmlinux-(.*)\.zst", entry["name"]) + if match: + available.add(match.group(1)) if not obj["has_more"]: break url = DROPBOX_API_URL + "/2/files/list_folder/continue" @@ -154,7 +133,7 @@ async def get_available_kernel_releases(http_client, token): return available -async def check_call(*args, **kwds): +async def check_call(*args: Any, **kwds: Any) -> None: proc = await asyncio.create_subprocess_exec(*args, **kwds) returncode = await proc.wait() if returncode != 0: @@ -164,10 +143,9 @@ async def check_call(*args, **kwds): ) -async def check_output(*args, **kwds): - proc = await asyncio.create_subprocess_exec( - *args, **kwds, stdout=asyncio.subprocess.PIPE - ) +async def check_output(*args: Any, **kwds: Any) -> bytes: + kwds["stdout"] = asyncio.subprocess.PIPE + proc = await asyncio.create_subprocess_exec(*args, **kwds) stdout = (await proc.communicate())[0] if proc.returncode != 0: command = " ".join(shlex.quote(arg) for arg in args) @@ -177,17 +155,27 @@ async def check_output(*args, **kwds): return stdout -async def compress_file(in_path, out_path, *args, **kwds): +async def compress_file(in_path: str, out_path: str, **kwds: Any) -> None: logger.info("compressing %r", in_path) start = time.monotonic() - await check_call("zstd", "-T0", "-19", "-q", in_path, "-o", out_path, *args, **kwds) + await check_call("zstd", "-T0", "-19", "-q", in_path, "-o", out_path, **kwds) elapsed = time.monotonic() - start logger.info("compressed %r in %s", in_path, humanize_duration(elapsed)) -def getpwd(): - # This is how GCC determines the working directory. See - # https://gcc.gnu.org/git/?p=gcc.git;a=blob;f=libiberty/getpwd.c;hb=HEAD +async def post_process_vmlinux(vmlinux: str, **kwds: Any) -> None: + logger.info("removing relocations from %r", vmlinux) + await check_call( + "objcopy", "--remove-relocations=*", vmlinux, vmlinux + ".norel", **kwds + ) + await compress_file(vmlinux + ".norel", vmlinux + ".zst") + + +def getpwd() -> str: + """ + Get the current working directory in the same way that GCC does. See + https://gcc.gnu.org/git/?p=gcc.git;a=blob;f=libiberty/getpwd.c;hb=HEAD. + """ try: pwd = os.environ["PWD"] if pwd.startswith("/"): @@ -200,13 +188,21 @@ def getpwd(): return os.getcwd() -async def build_kernel(commit, build_dir, log_file): +async def build_kernel( + commit: str, build_dir: str, log_file: TextIO +) -> Tuple[str, str]: + """ + Returns built kernel release (i.e., `uname -r`) and image name (e.g., + `arch/x86/boot/bzImage`). + """ await check_call( "git", "checkout", commit, stdout=log_file, stderr=asyncio.subprocess.STDOUT ) - with open(os.path.join(build_dir, ".config"), "w") as config_file: - config_file.write(DEFCONFIG) + shutil.copy( + os.path.join(os.path.dirname(__file__), "config"), + os.path.join(build_dir, ".config"), + ) logger.info("building %s", commit) start = time.monotonic() @@ -218,7 +214,7 @@ async def build_kernel(commit, build_dir, log_file): "KCFLAGS=" + cflags, "O=" + build_dir, "-j", - str(multiprocessing.cpu_count()), + str(nproc()), ] await check_call( "make", @@ -234,20 +230,21 @@ async def build_kernel(commit, build_dir, log_file): vmlinux = os.path.join(build_dir, "vmlinux") release, image_name = ( await asyncio.gather( - compress_file( - vmlinux, - vmlinux + ".zst", - stdout=log_file, - stderr=asyncio.subprocess.STDOUT, + post_process_vmlinux( + vmlinux, stdout=log_file, stderr=asyncio.subprocess.STDOUT ), check_output("make", *kbuild_args, "-s", "kernelrelease", stderr=log_file), check_output("make", *kbuild_args, "-s", "image_name", stderr=log_file), ) )[1:] - return build_dir, release.decode().strip(), image_name.decode().strip() + return release.decode().strip(), image_name.decode().strip() -async def try_build_kernel(commit): +async def try_build_kernel(commit: str) -> Optional[Tuple[str, str, str]]: + """ + Returns build directory, kernel release, and image name on success, None on + error. + """ proc = await asyncio.create_subprocess_exec( "git", "rev-parse", @@ -267,7 +264,8 @@ async def try_build_kernel(commit): os.mkdir(build_dir, 0o755) with open(log_path, "w") as log_file: try: - return await build_kernel(commit, build_dir, log_file) + release, image_name = await build_kernel(commit, build_dir, log_file) + return build_dir, release, image_name except Exception: logger.exception("building %s failed; see %r", commit, log_path) return None @@ -279,12 +277,12 @@ async def try_build_kernel(commit): class Uploader: CHUNK_SIZE = 8 * 1024 * 1024 - def __init__(self, http_client, token): + def __init__(self, http_client: aiohttp.ClientSession, token: str) -> None: self._http_client = http_client self._token = token - self._pending = [] + self._pending: List[Tuple[str, asyncio.Task[bool]]] = [] - async def _upload_file_obj(self, file, commit): + async def _upload_file_obj(self, file: BinaryIO, commit: Dict[str, Any]) -> None: headers = { "Authorization": "Bearer " + self._token, "Content-Type": "application/octet-stream", @@ -320,7 +318,9 @@ async def _upload_file_obj(self, file, commit): if last: break - async def _try_upload_file_obj(self, file, commit): + async def _try_upload_file_obj( + self, file: BinaryIO, commit: Dict[str, Any] + ) -> bool: try: logger.info("uploading %r", commit["path"]) start = time.monotonic() @@ -332,7 +332,7 @@ async def _try_upload_file_obj(self, file, commit): logger.exception("uploading %r failed", commit["path"]) return False - async def _try_upload_file(self, path, commit): + async def _try_upload_file(self, path: str, commit: Dict[str, Any]) -> bool: try: logger.info("uploading %r to %r", path, commit["path"]) start = time.monotonic() @@ -351,25 +351,31 @@ async def _try_upload_file(self, path, commit): return False @staticmethod - def _make_commit(dst_path, *, mode=None, autorename=None): - commit = {"path": dst_path} + def _make_commit( + dst_path: str, *, mode: Optional[str] = None, autorename: Optional[bool] = None + ) -> Dict[str, Any]: + commit: Dict[str, Any] = {"path": dst_path} if mode is not None: commit["mode"] = mode if autorename is not None: commit["autorename"] = autorename return commit - def queue_file_obj(self, file, *args, **kwds): + def queue_file_obj(self, file: BinaryIO, *args: Any, **kwds: Any) -> None: commit = self._make_commit(*args, **kwds) task = asyncio.create_task(self._try_upload_file_obj(file, commit)) self._pending.append((commit["path"], task)) - def queue_file(self, src_path, *args, **kwds): + def queue_file(self, src_path: str, *args: Any, **kwds: Any) -> None: commit = self._make_commit(*args, **kwds) task = asyncio.create_task(self._try_upload_file(src_path, commit)) self._pending.append((commit["path"], task)) - async def wait(self): + async def wait(self) -> Tuple[List[str], List[str]]: + """ + Returns list of successfully uploaded paths and list of paths that + failed to upload. + """ succeeded = [] failed = [] for path, task in self._pending: @@ -381,10 +387,16 @@ async def wait(self): return succeeded, failed -# The Dropbox API doesn't provide a way to get the links for entries inside of -# a shared folder, so we're forced to scrape them from the webpage and XHR -# endpoint. -async def list_shared_folder(http_client, url): +async def list_shared_folder( + http_client: aiohttp.ClientSession, url: str +) -> AsyncGenerator[Tuple[str, bool, str], None]: + """ + List a Dropbox shared folder. The Dropbox API doesn't provide a way to get + the links for entries inside of a shared folder, so we're forced to scrape + them from the webpage and XHR endpoint. + + Generates filename, whether it is a directory, and its shared link. + """ method = "GET" data = None while True: @@ -394,6 +406,7 @@ async def list_shared_folder(http_client, url): match = re.search( r'"\{\\"shared_link_infos\\".*[^\\]\}"', (await resp.text()) ) + assert match obj = json.loads(json.loads(match.group())) else: await raise_for_status_body(resp) @@ -406,16 +419,23 @@ async def list_shared_folder(http_client, url): method = "POST" url = "https://www.dropbox.com/list_shared_link_folder_entries" data = { - "t": http_client.cookie_jar.filter_cookies(url)["t"].value, + "t": http_client.cookie_jar.filter_cookies(URL(url))["t"].value, "link_key": obj["folder_share_token"]["linkKey"], "link_type": obj["folder_share_token"]["linkType"], "secure_hash": obj["folder_share_token"]["secureHash"], "sub_path": obj["folder_share_token"]["subPath"], } + assert data is not None data["voucher"] = obj["next_request_voucher"] -async def walk_shared_folder(http_client, url): +async def walk_shared_folder( + http_client: aiohttp.ClientSession, url: str +) -> AsyncGenerator[Tuple[str, List[Tuple[str, str]], List[Tuple[str, str]]], None]: + """ + Walk a Dropbox shared folder, similar to os.walk(). Generates path, list of + files and their shared links, and list of folders and their shared links. + """ stack = [("", url)] while stack: path, url = stack.pop() @@ -432,7 +452,7 @@ async def walk_shared_folder(http_client, url): stack.extend((path + filename, href) for filename, href in dirs) -def make_download_url(url): +def make_download_url(url: str) -> str: parsed = urllib.parse.urlsplit(url) query = [ (name, value) @@ -443,7 +463,9 @@ def make_download_url(url): return urllib.parse.urlunsplit(parsed._replace(query=urllib.parse.urlencode(query))) -async def update_index(http_client, token, uploader): +async def update_index( + http_client: aiohttp.ClientSession, token: str, uploader: Uploader +) -> bool: try: logger.info("finding shared folder link") headers = {"Authorization": "Bearer " + token} @@ -505,7 +527,7 @@ async def update_index(http_client, token, uploader): return False -async def main(): +async def main() -> None: logging.basicConfig( format="%(asctime)s:%(levelname)s:%(name)s:%(message)s", level=logging.INFO ) @@ -551,7 +573,7 @@ async def main(): ): sys.exit("-b/-k must be run from linux.git") - if args.upload or args.upload_files or args.index: + if args.build_kernel_org or args.upload or args.upload_files or args.index: if os.isatty(sys.stdin.fileno()): dropbox_token = getpass.getpass("Enter Dropbox app API token: ") else: @@ -564,21 +586,37 @@ async def main(): async with aiohttp.ClientSession(trust_env=True) as http_client: # dict rather than set to preserve insertion order. - to_build = {build: True for build in (args.build or ())} + to_build = dict.fromkeys(args.build or ()) if args.build_kernel_org: + localversion = get_current_localversion() + logger.info("current localversion: %s", localversion) try: + # In this context, "version" is a tag name without the "v" + # prefix and "release" is a uname release string. logger.info( - "getting list of kernel.org releases and available releases" + "getting list of kernel.org versions and available releases" ) kernel_org, available = await asyncio.gather( - get_kernel_org_releases(http_client), + get_kernel_org_versions(http_client), get_available_kernel_releases(http_client, dropbox_token), ) - logger.info("kernel.org releases: %s", ", ".join(kernel_org)) + logger.info("kernel.org versions: %s", ", ".join(kernel_org)) logger.info("available releases: %s", ", ".join(sorted(available))) - for kernel in kernel_org: - if kernel not in available: - to_build[kernel] = True + for version in kernel_org: + match = re.fullmatch(r"(\d+\.\d+)(\.\d+)?(-rc\d+)?", version) + if not match: + logger.error("couldn't parse kernel.org version %r", version) + sys.exit(1) + release = "".join( + [ + match.group(1), + match.group(2) or ".0", + match.group(3) or "", + localversion, + ] + ) + if release not in available: + to_build["v" + version] = None except Exception: logger.exception( "failed to get kernel.org releases and/or available releases" diff --git a/vmtest/onoatimehack.c b/vmtest/onoatimehack.c new file mode 100644 index 000000000..3d65f51cf --- /dev/null +++ b/vmtest/onoatimehack.c @@ -0,0 +1,105 @@ +// Copyright 2020 - Omar Sandoval +// SPDX-License-Identifier: GPL-3.0+ + +/* + * QEMU's 9pfs server passes through O_NOATIME from the client. If the server + * process doesn't have permission to use O_NOATIME (e.g., because it's being + * run without privileges and it doesn't own the file), then the open will fail. + * Overlayfs uses O_NOATIME, so overlayfs on top of 9pfs doesn't work. We work + * around this with this LD_PRELOAD hack to remove O_NOATIME from open() and + * fcntl() calls. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ORIG(name) ({ \ + static typeof(&name) orig; \ + \ + if (!orig) { \ + void *tmp; \ + \ + tmp = dlsym(RTLD_NEXT, #name); \ + if (!tmp) { \ + fprintf(stderr, "%s\n", dlerror()); \ + abort(); \ + } \ + orig = tmp; \ + } \ + orig; \ +}) + +#ifndef __OPEN_NEEDS_MODE +/* From glibc fnctl.h. */ +#ifdef __O_TMPFILE +# define __OPEN_NEEDS_MODE(oflag) \ + (((oflag) & O_CREAT) != 0 || ((oflag) & __O_TMPFILE) == __O_TMPFILE) +#else +# define __OPEN_NEEDS_MODE(oflag) (((oflag) & O_CREAT) != 0) +#endif +#endif + +#define OPEN_MODE(flags) ({ \ + mode_t mode = 0; \ + \ + if (__OPEN_NEEDS_MODE(flags)) { \ + va_list ap; \ + \ + va_start(ap, flags); \ + mode = va_arg(ap, mode_t); \ + va_end(ap); \ + } \ + mode; \ +}) + +int open(const char *pathname, int flags, ...) +{ + flags &= ~O_NOATIME; + return ORIG(open)(pathname, flags, OPEN_MODE(flags)); +} + +int open64(const char *pathname, int flags, ...) +{ + flags &= ~O_NOATIME; + return ORIG(open64)(pathname, flags, OPEN_MODE(flags)); +} + +int openat(int dirfd, const char *pathname, int flags, ...) +{ + flags &= ~O_NOATIME; + return ORIG(openat)(dirfd, pathname, flags, OPEN_MODE(flags)); +} + +int openat64(int dirfd, const char *pathname, int flags, ...) +{ + flags &= ~O_NOATIME; + return ORIG(openat64)(dirfd, pathname, flags, OPEN_MODE(flags)); +} + +#define FCNTL_ARG(cmd) ({ \ + va_list ap; \ + void *arg; \ + \ + va_start(ap, cmd); \ + arg = va_arg(ap, void *); \ + va_end(ap); \ + if (cmd == F_SETFL) \ + arg = (void *)((uintptr_t)arg & ~O_NOATIME); \ + arg; \ +}) + +int fcntl(int fd, int cmd, ...) +{ + return ORIG(fcntl)(fd, cmd, FCNTL_ARG(cmd)); +} + +int fcntl64(int fd, int cmd, ...) +{ + return ORIG(fcntl64)(fd, cmd, FCNTL_ARG(cmd)); +} diff --git a/vmtest/resolver.py b/vmtest/resolver.py new file mode 100644 index 000000000..bf2f8fa84 --- /dev/null +++ b/vmtest/resolver.py @@ -0,0 +1,173 @@ +# Copyright 2020 - Omar Sandoval +# SPDX-License-Identifier: GPL-3.0+ + +import fnmatch +import glob +import os +import os.path +import queue +import re +import shutil +import subprocess +import threading +from typing import Any, Dict, Iterator, NamedTuple, Optional, Sequence, Union +import urllib.request + +from util import KernelVersion + + +# This URL contains a mapping from file names to URLs where those files can be +# downloaded. This is needed because the files under a Dropbox shared folder +# have randomly-generated links. +_INDEX_URL = "https://www.dropbox.com/sh/2mcf2xvg319qdaw/AAC_AbpvQPRrHF-99B2REpXja/x86_64/INDEX?dl=1" + + +class ResolvedKernel(NamedTuple): + release: str + vmlinux: str + vmlinuz: str + + +class KernelResolver: + def __init__(self, kernels: Sequence[str], download_dir: str) -> None: + self._kernels = kernels + self._arch_download_dir = os.path.join(download_dir, "x86_64") + self._cached_index: Optional[Dict[str, str]] = None + self._index_lock = threading.Lock() + self._queue: queue.Queue[Union[ResolvedKernel, Exception, None]] = queue.Queue() + self._thread: Optional[threading.Thread] + # Don't create the thread if we don't have anything to do. + if kernels: + self._thread = threading.Thread(target=self._resolve_all, daemon=True) + self._thread.start() + else: + self._thread = None + self._queue.put(None) + + def __enter__(self) -> "KernelResolver": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + if self._thread: + self._thread.join() + + def _resolve_build(self, path: str) -> ResolvedKernel: + release = subprocess.check_output( + ["make", "-s", "kernelrelease"], universal_newlines=True, cwd=path, + ).strip() + vmlinuz = subprocess.check_output( + ["make", "-s", "image_name"], universal_newlines=True, cwd=path, + ).strip() + return ResolvedKernel( + release=release, + vmlinux=os.path.join(path, "vmlinux"), + vmlinuz=os.path.join(path, vmlinuz), + ) + + @property + def _index(self) -> Dict[str, str]: + if self._cached_index is None: + with self._index_lock: + if self._cached_index is None: + index = {} + with urllib.request.urlopen(_INDEX_URL) as u: + for line in u: + name, url = line.decode().rstrip("\n").split("\t", 1) + index[name] = url + self._cached_index = index + return self._cached_index + + def _find_kernel(self, pattern: str) -> str: + matches = [] + for name, url in self._index.items(): + match = re.fullmatch(r"vmlinux-(.*)\.zst", name) + if match and fnmatch.fnmatch(match.group(1), pattern): + matches.append(match.group(1)) + if not matches: + raise Exception(f"no kernel release matches {pattern!r}") + return max(matches, key=KernelVersion) + + def _download_file(self, name: str, *, compressed: bool = False) -> str: + path = os.path.join(self._arch_download_dir, name) + if not os.path.exists(path): + dir = os.path.dirname(path) + os.makedirs(dir, exist_ok=True) + with open(os.open(dir, os.O_WRONLY | os.O_TMPFILE), "wb") as f: + if compressed: + name += ".zst" + with urllib.request.urlopen(self._index[name]) as u: + if compressed: + with subprocess.Popen( + ["zstd", "-d", "-", "--stdout"], + stdin=subprocess.PIPE, + stdout=f, + ) as proc: + assert proc.stdin is not None + shutil.copyfileobj(u, proc.stdin) + if proc.returncode != 0: + raise subprocess.CalledProcessError( + proc.returncode, proc.args + ) + else: + shutil.copyfileobj(u, f) + # Passing dst_dir_fd forces Python to use linkat() with + # AT_SYMLINK_FOLLOW instead of link(). See + # https://bugs.python.org/msg348086. + dir_fd = os.open(dir, os.O_RDONLY | os.O_DIRECTORY) + try: + os.link( + f"/proc/self/fd/{f.fileno()}", + os.path.basename(path), + dst_dir_fd=dir_fd, + ) + finally: + os.close(dir_fd) + return path + + def _download(self, release: str) -> ResolvedKernel: + # Only do the wildcard lookup if the release is a wildcard + # pattern. + if release != glob.escape(release): + release = self._find_kernel(release) + vmlinux_path = self._download_file(f"vmlinux-{release}", compressed=True) + vmlinuz_path = self._download_file(f"vmlinuz-{release}") + return ResolvedKernel(release, vmlinux_path, vmlinuz_path) + + def _resolve_all(self) -> None: + try: + for kernel in self._kernels: + if kernel.startswith(".") or kernel.startswith("/"): + resolved = self._resolve_build(kernel) + else: + resolved = self._download(kernel) + self._queue.put(resolved) + self._queue.put(None) + except Exception as e: + self._queue.put(e) + + def __iter__(self) -> Iterator[ResolvedKernel]: + while True: + result = self._queue.get() + if isinstance(result, Exception): + raise result + elif result is None: + break + yield result + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser( + description="resolve and download vmtest kernels", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "-d", "--directory", default="build/vmtest", help="directory to download to" + ) + parser.add_argument("kernels", metavar="KERNEL", nargs="*") + args = parser.parse_args() + + with KernelResolver(args.kernels, args.directory) as resolver: + for kernel in resolver: + print(kernel) diff --git a/vmtest/vm.py b/vmtest/vm.py new file mode 100644 index 000000000..2713ba62c --- /dev/null +++ b/vmtest/vm.py @@ -0,0 +1,209 @@ +# Copyright 2020 - Omar Sandoval +# SPDX-License-Identifier: GPL-3.0+ + +import os +import os.path +import socket +import subprocess +import tempfile +from typing import Any, Dict, Mapping, Optional, Sequence + +from util import nproc + + +class LostVMError(Exception): + pass + + +class VM: + def __init__( + self, *, init: str, onoatimehack: str, vmlinux: str, vmlinuz: str, + ) -> None: + self._temp_dir = tempfile.TemporaryDirectory("drgn-vmtest-") + self._server_sock = socket.socket(socket.AF_UNIX) + socket_path = os.path.join(self._temp_dir.name, "socket") + self._server_sock.bind(socket_path) + self._server_sock.listen() + init = os.path.abspath(init) + if " " in init: + init = '"' + init + '"' + vmlinux = os.path.abspath(vmlinux) + if " " in vmlinux: + vmlinux = '"' + vmlinux + '"' + # This was added in QEMU 4.2.0. + if ( + "multidevs" + in subprocess.run( + ["qemu-system-x86_64", "-help"], + stdout=subprocess.PIPE, + universal_newlines=True, + ).stdout + ): + multidevs = ",multidevs=remap" + else: + multidevs = "" + self._qemu = subprocess.Popen( + [ + # fmt: off + "qemu-system-x86_64", "-cpu", "kvm64", "-enable-kvm", + + "-smp", str(nproc()), "-m", "2G", + + "-nodefaults", "-display", "none", "-serial", "mon:stdio", + + # This along with -append panic=-1 ensures that we exit on a + # panic instead of hanging. + "-no-reboot", + + "-virtfs", + f"local,id=root,path=/,mount_tag=/dev/root,security_model=none,readonly{multidevs}", + + "-device", "virtio-serial", + "-chardev", f"socket,id=vmtest,path={socket_path}", + "-device", + "virtserialport,chardev=vmtest,name=com.osandov.vmtest.0", + + "-kernel", vmlinuz, + "-append", + f"rootfstype=9p rootflags=trans=virtio,cache=loose ro console=0,115200 panic=-1 init={init} VMLINUX={vmlinux}", + # fmt: on + ], + env={ + **os.environ, + "LD_PRELOAD": f"{onoatimehack}:{os.getenv('LD_PRELOAD', '')}", + }, + ) + self._server_sock.settimeout(5) + try: + self._sock = self._server_sock.accept()[0] + except socket.timeout: + raise LostVMError( + f"QEMU did not connect within {self._server_sock.gettimeout()} seconds" + ) + + def __enter__(self) -> "VM": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + if hasattr(self, "_sock"): + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + if hasattr(self, "_qemu"): + self._qemu.wait() + if hasattr(self, "_server_sock"): + self._server_sock.close() + if hasattr(self, "_temp_dir"): + self._temp_dir.cleanup() + + def run( + self, + args: Sequence[str], + *, + executable: Optional[str] = None, + cwd: Optional[str] = None, + env: Optional[Mapping[str, str]] = None, + ) -> subprocess.CompletedProcess: # type: ignore[type-arg] + self._sock.sendall(len(args).to_bytes(4, "little")) + for arg in args: + self._sock.sendall(arg.encode()) + self._sock.sendall(b"\0") + + if env is None: + env = {} + self._sock.sendall(len(env).to_bytes(4, "little")) + for key, value in env.items(): + self._sock.sendall(key.encode()) + self._sock.sendall(b"=") + self._sock.sendall(value.encode()) + self._sock.sendall(b"\0") + + if executable is None: + executable = args[0] + self._sock.sendall(executable.encode()) + self._sock.sendall(b"\0") + + self._sock.sendall((cwd or "").encode()) + self._sock.sendall(b"\0") + + wstatus_buf = bytearray() + while len(wstatus_buf) < 2: + try: + buf = self._sock.recv(2 - len(wstatus_buf)) + except ConnectionResetError: + buf = b"" + if not buf: + raise LostVMError("lost VM") + wstatus_buf.extend(buf) + wstatus = int.from_bytes(wstatus_buf, "little") + if os.WIFEXITED(wstatus): + returncode = os.WEXITSTATUS(wstatus) + else: + returncode = -os.WTERMSIG(wstatus) + return subprocess.CompletedProcess(args, returncode) + + +if __name__ == "__main__": + import argparse + import sys + + from vmtest.build import build_vmtest + from vmtest.resolver import KernelResolver + + parser = argparse.ArgumentParser( + description="run vmtest virtual machine", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "-d", + "--directory", + default="build/vmtest", + help="directory for build artifacts and downloaded kernels", + ) + parser.add_argument( + "--lost-status", + metavar="STATUS", + type=int, + default=128, + help="exit status if VM is lost", + ) + parser.add_argument( + "-k", + "--kernel", + default=argparse.SUPPRESS, + help="kernel to use (default: latest available kernel)", + ) + parser.add_argument( + "-w", + "--wd", + metavar="PATH", + default=argparse.SUPPRESS, + help="working directory for command (default: /)", + ) + parser.add_argument( + "command", + type=str, + nargs=argparse.REMAINDER, + help="command to run in VM (default: /bin/sh -i)", + ) + args = parser.parse_args() + + with KernelResolver( + [getattr(args, "kernel", "*")], download_dir=args.directory + ) as resolver: + kernel = next(iter(resolver)) + try: + with VM( + **build_vmtest(args.directory), # type: ignore + vmlinux=kernel.vmlinux, + vmlinuz=kernel.vmlinuz, + ) as vm: + proc = vm.run( + args.command or ["/bin/sh", "-i"], cwd=getattr(args, "wd", None) + ) + if proc.returncode < 0: + sys.exit(128 - proc.returncode) + else: + sys.exit(proc.returncode) + except LostVMError as e: + print("error:", e, file=sys.stderr) + sys.exit(args.lost_status)