Migrate VM shares from 9p to virtiofs; update QEMU and doctor

This commit is contained in:
Joshua Bell 2026-01-29 13:58:49 -06:00
parent 221d0ca596
commit 2555a47d62
3 changed files with 34 additions and 68 deletions

View file

@ -10,6 +10,7 @@ import (
"qvm/internal/workspace" "qvm/internal/workspace"
"strconv" "strconv"
"strings" "strings"
"syscall"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -232,7 +233,7 @@ func checkVirtiofsdSockets() []string {
continue continue
} }
if err := process.Signal(os.Signal(nil)); err != nil { if err := process.Signal(syscall.Signal(0)); err != nil {
issues = append(issues, fmt.Sprintf("Orphaned virtiofsd socket: %s (process %d not running)", sock, pid)) issues = append(issues, fmt.Sprintf("Orphaned virtiofsd socket: %s (process %d not running)", sock, pid))
} }
} }

View file

@ -130,71 +130,42 @@
# Josh's timezone # Josh's timezone
time.timeZone = "America/Chicago"; time.timeZone = "America/Chicago";
# Git safe.directory for 9p ownership issues # Git safe.directory for virtiofs ownership issues
environment.etc."gitconfig".text = '' environment.etc."gitconfig".text = ''
[safe] [safe]
directory = * directory = *
''; '';
# 9p mount points for caches (must match qvm-start mount tags) # virtiofs mount points for caches (must match qvm virtiofsd mount tags)
# Using virtiofs instead of 9p for better performance and hot-mount support
fileSystems."/cache/cargo" = { fileSystems."/cache/cargo" = {
device = "cargo_home"; device = "cargo_home";
fsType = "9p"; fsType = "virtiofs";
options = [ options = [ "nofail" ];
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
}; };
fileSystems."/cache/target" = { fileSystems."/cache/target" = {
device = "cargo_target"; device = "cargo_target";
fsType = "9p"; fsType = "virtiofs";
options = [ options = [ "nofail" ];
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
}; };
fileSystems."/cache/pnpm" = { fileSystems."/cache/pnpm" = {
device = "pnpm_store"; device = "pnpm_store";
fsType = "9p"; fsType = "virtiofs";
options = [ options = [ "nofail" ];
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
}; };
fileSystems."/cache/sccache" = { fileSystems."/cache/sccache" = {
device = "sccache"; device = "sccache";
fsType = "9p"; fsType = "virtiofs";
options = [ options = [ "nofail" ];
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
}; };
fileSystems."/root/.config/opencode" = { fileSystems."/root/.config/opencode" = {
device = "opencode_config"; device = "opencode_config";
fsType = "9p"; fsType = "virtiofs";
options = [ options = [ "nofail" ];
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
}; };
# Environment variables for cache directories # Environment variables for cache directories
@ -215,47 +186,46 @@
"d /cache/sccache 0755 root root -" "d /cache/sccache 0755 root root -"
]; ];
# Systemd mount units for cache directories # Systemd mount units for cache directories using virtiofs
# The NixOS VM runner doesn't include custom fileSystems entries in the generated fstab, # virtiofs provides better performance than 9p and supports hot-mounting
# so we use systemd mount units to automount the 9p virtfs shares at boot.
systemd.mounts = [ systemd.mounts = [
{ {
what = "cargo_home"; what = "cargo_home";
where = "/cache/cargo"; where = "/cache/cargo";
type = "9p"; type = "virtiofs";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; options = "nofail";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ]; after = [ "systemd-modules-load.service" ];
} }
{ {
what = "cargo_target"; what = "cargo_target";
where = "/cache/target"; where = "/cache/target";
type = "9p"; type = "virtiofs";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; options = "nofail";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ]; after = [ "systemd-modules-load.service" ];
} }
{ {
what = "pnpm_store"; what = "pnpm_store";
where = "/cache/pnpm"; where = "/cache/pnpm";
type = "9p"; type = "virtiofs";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; options = "nofail";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ]; after = [ "systemd-modules-load.service" ];
} }
{ {
what = "sccache"; what = "sccache";
where = "/cache/sccache"; where = "/cache/sccache";
type = "9p"; type = "virtiofs";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; options = "nofail";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ]; after = [ "systemd-modules-load.service" ];
} }
{ {
what = "opencode_config"; what = "opencode_config";
where = "/root/.config/opencode"; where = "/root/.config/opencode";
type = "9p"; type = "virtiofs";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; options = "nofail";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ]; after = [ "systemd-modules-load.service" ];
} }
@ -298,15 +268,9 @@
# GB disk size # GB disk size
virtualisation.diskSize = 40 * 1024; virtualisation.diskSize = 40 * 1024;
# NOTE: Using 9p virtfs for filesystem sharing # NOTE: Using virtiofs for filesystem sharing (via virtiofsd + vhost-user-fs-pci)
# The NixOS VM runner doesn't support virtio-fs out of the box. # This provides better performance than 9p and supports hot-mounting workspaces
# We use 9p (-virtfs) which is the standard method for QEMU VMs. # without VM restart. The qvm CLI manages virtiofsd daemons for each mount.
#
# See: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualisation/qemu-vm.nix#L530
# The sharedDirectories option hardcodes: -virtfs local,path=...,security_model=...
#
# 9p mounts are configured via QEMU_OPTS environment variable:
# -virtfs local,path=$HOST_PATH,mount_tag=$TAG,security_model=mapped-xattr,msize=104857600
system.stateVersion = stateVersion; system.stateVersion = stateVersion;
}; };

View file

@ -12,13 +12,14 @@ import (
func buildQEMUCommand(cfg *config.Config, sshPort int, mounts []virtiofsd.Mount) []string { func buildQEMUCommand(cfg *config.Config, sshPort int, mounts []virtiofsd.Mount) []string {
memSize := cfg.VM.Memory memSize := cfg.VM.Memory
// vhost-user-fs requires shared memory backend // vhost-user-fs requires shared memory backend with share=on
// We must specify memory size only via the memory backend and attach it to NUMA
// The -m flag must match the memory backend size for QEMU to be happy
args := []string{ args := []string{
"-machine", "q35", "-machine", "q35,memory-backend=mem",
"-accel", "kvm", "-accel", "kvm",
"-cpu", "host", "-cpu", "host",
"-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%s,share=on", memSize), "-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%s,share=on", memSize),
"-numa", "node,memdev=mem",
"-smp", strconv.Itoa(cfg.VM.CPUs), "-smp", strconv.Itoa(cfg.VM.CPUs),
"-display", "none", "-display", "none",
"-daemonize", "-daemonize",