Add qvm clean; make rebuild produce VM runner; default qvm run to shell

This commit is contained in:
Joshua Bell 2026-01-26 10:14:23 -06:00
parent e766c8466d
commit 601b4ab15e
7 changed files with 395 additions and 249 deletions

View file

@ -1,15 +1,13 @@
#!/usr/bin/env bash
#
# qvm-start - Launch the QEMU VM with all required configuration
# qvm-start - Launch the QVM using the NixOS VM runner
#
# This script starts the QVM virtual machine with:
# - KVM acceleration and host CPU passthrough
# - Configurable memory and CPU count
# - Overlay disk backed by base.qcow2 (copy-on-write)
# - SSH port forwarding on auto-selected port
# - 9p mounts for shared caches (cargo, pnpm, sccache)
# - Serial console logging
# - Daemonized execution with PID file
# This script starts the QVM virtual machine by:
# - Building the VM if not already built
# - Configuring QEMU options via environment variables
# - Adding 9p mounts for caches and workspaces
# - Starting the VM in the background
# - Waiting for SSH to become available
#
set -euo pipefail
@ -21,8 +19,6 @@ source "$QVM_LIB_DIR/common.sh"
#
# find_available_port - Find an available TCP port starting from base
# Args: $1 - starting port number (default: 2222)
# Returns: available port number on stdout
#
find_available_port() {
local port="${1:-2222}"
@ -34,69 +30,63 @@ find_available_port() {
echo "$port"
return 0
fi
(( port++ ))
(( attempt++ ))
(( port++ )) || true
(( attempt++ )) || true
done
die "Could not find available port after $max_attempts attempts"
}
#
# mount_workspaces - Add virtfs entries for registered workspaces
# Args: $1 - name of array variable to append to
# Usage: mount_workspaces qemu_cmd
# build_qemu_opts - Build QEMU_OPTS environment variable with 9p mounts
#
mount_workspaces() {
local -n cmd_array=$1
build_qemu_opts() {
local ssh_port="$1"
local opts=""
# Check if workspaces registry exists
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "No workspaces registry found, skipping workspace mounts"
return 0
# 9p mounts for shared caches
opts+="-virtfs local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr "
# Mount host opencode config if it exists
if [[ -d "$QVM_HOST_OPENCODE_CONFIG" ]]; then
log_info "Adding opencode config mount..."
opts+="-virtfs local,path=$QVM_HOST_OPENCODE_CONFIG,mount_tag=opencode_config,security_model=mapped-xattr "
fi
# Check if file is empty or invalid JSON
if [[ ! -s "$QVM_WORKSPACES_FILE" ]]; then
log_info "Workspaces registry is empty, skipping workspace mounts"
return 0
fi
# Parse workspaces and add virtfs entries
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ "$workspace_count" -eq 0 ]]; then
log_info "No workspaces registered, skipping workspace mounts"
return 0
fi
log_info "Mounting $workspace_count workspace(s)..."
# Iterate through workspaces and add virtfs entries
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
# Add workspace mounts from registry
if [[ -f "$QVM_WORKSPACES_FILE" && -s "$QVM_WORKSPACES_FILE" ]]; then
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ -z "$path" || -z "$mount_tag" || "$path" == "null" || "$mount_tag" == "null" ]]; then
log_warn "Skipping invalid workspace entry at index $i"
(( i++ ))
continue
if [[ "$workspace_count" -gt 0 ]]; then
log_info "Adding $workspace_count workspace mount(s)..."
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
if [[ -n "$path" && -n "$mount_tag" && "$path" != "null" && "$mount_tag" != "null" && -d "$path" ]]; then
log_info " - $path -> $mount_tag"
opts+="-virtfs local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr "
fi
(( i++ )) || true
done
fi
# Verify path exists
if [[ ! -d "$path" ]]; then
log_warn "Workspace path does not exist: $path (skipping)"
(( i++ ))
continue
fi
log_info " - $path -> $mount_tag"
cmd_array+=(-virtfs "local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr")
(( i++ )) || true # Prevent set -e from exiting when i was 0
done
fi
# Serial console to log file and daemonize
opts+="-serial file:$QVM_SERIAL_LOG "
opts+="-display none "
opts+="-daemonize "
opts+="-pidfile $QVM_PID_FILE "
echo "$opts"
}
#
@ -126,25 +116,26 @@ main() {
# First-run initialization
ensure_dirs
if [[ ! -f "$QVM_BASE_IMAGE" ]]; then
log_info "First run detected - building base image..."
# Check if VM runner exists, build if not
if [[ ! -L "$QVM_VM_RUNNER" || ! -f "$(readlink -f "$QVM_VM_RUNNER" 2>/dev/null || echo "")" ]]; then
log_info "First run detected - building VM..."
log_info "This may take several minutes."
# Call qvm-rebuild to build the image
SCRIPT_DIR="$(dirname "$0")"
if ! "$SCRIPT_DIR/qvm-rebuild"; then
die "Failed to build base image. Run 'qvm rebuild' manually to debug."
die "Failed to build VM. Run 'qvm rebuild' manually to debug."
fi
fi
# Create overlay image if it doesn't exist
if [[ ! -f "$QVM_OVERLAY" ]]; then
log_info "Creating overlay disk..."
if ! qemu-img create -f qcow2 -b "$QVM_BASE_IMAGE" -F qcow2 "$QVM_OVERLAY"; then
die "Failed to create overlay disk"
fi
else
log_info "Using existing overlay disk"
# Verify VM runner exists now
if [[ ! -L "$QVM_VM_RUNNER" ]]; then
die "VM runner not found at $QVM_VM_RUNNER. Run 'qvm rebuild' first."
fi
local vm_script
vm_script=$(readlink -f "$QVM_VM_RUNNER")
if [[ ! -f "$vm_script" ]]; then
die "VM runner script not found. Run 'qvm rebuild' to fix."
fi
# Find available SSH port
@ -153,63 +144,53 @@ main() {
log_info "Using SSH port: $ssh_port"
# Get memory and CPU settings from environment or use defaults
local memory="${QVM_MEMORY:-40G}"
local memory="${QVM_MEMORY:-30G}"
local cpus="${QVM_CPUS:-30}"
log_info "VM resources: ${memory} memory, ${cpus} CPUs"
# Build QEMU command
local qemu_cmd=(
qemu-system-x86_64
-enable-kvm
-cpu host
-m "$memory"
-smp "$cpus"
# Overlay disk (virtio for performance)
-drive "file=$QVM_OVERLAY,if=virtio,format=qcow2"
# User-mode networking with SSH port forward
-netdev "user,id=net0,hostfwd=tcp::${ssh_port}-:22"
-device "virtio-net-pci,netdev=net0"
# 9p mounts for shared caches (security_model=mapped-xattr for proper permissions)
# Note: trans, version, msize are kernel-side mount options (in NixOS flake), not QEMU options
-virtfs "local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr"
-virtfs "local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr"
-virtfs "local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr"
-virtfs "local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr"
)
# Build QEMU options
local qemu_opts
qemu_opts=$(build_qemu_opts "$ssh_port")
# Add workspace mounts from registry
mount_workspaces qemu_cmd
# Launch VM using the NixOS runner script
# The runner script respects these environment variables:
# - QEMU_OPTS: additional QEMU options
# - NIX_DISK_IMAGE: path to disk image (optional, uses tmpdir by default)
log_info "Launching VM..."
# Continue building QEMU command
qemu_cmd+=(
# Serial console to log file
-serial "file:$QVM_SERIAL_LOG"
# No graphics (use -display none for daemonized mode)
-display none
# Daemonize with PID file
-daemonize
-pidfile "$QVM_PID_FILE"
)
# Create persistent disk image location if needed
local disk_image="$QVM_STATE_DIR/qvm-dev.qcow2"
# Launch QEMU
log_info "Launching QEMU..."
if ! "${qemu_cmd[@]}"; then
export QEMU_OPTS="$qemu_opts -m $memory -smp $cpus"
export QEMU_NET_OPTS="hostfwd=tcp::${ssh_port}-:22"
export NIX_DISK_IMAGE="$disk_image"
# Run VM - the script uses exec with qemu's -daemonize flag, so it returns quickly
if ! "$vm_script" &>/dev/null; then
cleanup_on_failure
die "Failed to start QEMU"
die "Failed to start VM"
fi
# Wait a moment for QEMU to create PID file
sleep 2
# If PID file wasn't created by our QEMU_OPTS, get it from the background process
if [[ ! -f "$QVM_PID_FILE" ]]; then
# Try to find the QEMU process
local qemu_pid
qemu_pid=$(pgrep -f "qemu.*qvm-dev" | head -1 || echo "")
if [[ -n "$qemu_pid" ]]; then
echo "$qemu_pid" > "$QVM_PID_FILE"
fi
fi
# Save SSH port to file
echo "$ssh_port" > "$QVM_SSH_PORT_FILE"
# Wait for SSH to become available
if ! wait_for_ssh "$ssh_port" 60; then
if ! wait_for_ssh "$ssh_port" 120; then
cleanup_on_failure
die "VM started but SSH did not become available"
die "VM started but SSH did not become available. Check: $QVM_SERIAL_LOG"
fi
# Success!