Add qvm clean; make rebuild produce VM runner; default qvm run to shell

This commit is contained in:
Joshua Bell 2026-01-26 10:14:23 -06:00
parent e766c8466d
commit 601b4ab15e
7 changed files with 395 additions and 249 deletions

View file

@ -29,11 +29,12 @@ USAGE:
COMMANDS: COMMANDS:
start Start the VM (create if needed) start Start the VM (create if needed)
stop Stop the running VM stop Stop the running VM
run Execute a command in the VM run Execute a command in the VM (or start shell if no command)
ssh Open SSH session or run command in VM ssh Open SSH session or run command in VM
status Show VM status and information status Show VM status and information
rebuild Rebuild the base VM image from flake rebuild Rebuild the base VM image from flake
reset Delete overlay and start fresh (keeps base image) reset Delete overlay and start fresh (keeps base image)
clean Remove ALL QVM data (images, state, caches)
OPTIONS: OPTIONS:
-h, --help Show this help message -h, --help Show this help message
@ -72,7 +73,7 @@ main() {
shift shift
case "$subcommand" in case "$subcommand" in
start|stop|run|ssh|status|rebuild|reset) start|stop|run|ssh|status|rebuild|reset|clean)
# Route to the appropriate qvm-* script # Route to the appropriate qvm-* script
# Use exec to replace this process with the subcommand # Use exec to replace this process with the subcommand
exec "${SCRIPT_DIR}/qvm-${subcommand}" "$@" exec "${SCRIPT_DIR}/qvm-${subcommand}" "$@"

115
bin/qvm-clean Executable file
View file

@ -0,0 +1,115 @@
#!/usr/bin/env bash
#
# qvm-clean - Completely remove all QVM state, images, and caches
#
# This script performs a full cleanup of all QVM-related data:
# - Base image (base.qcow2)
# - VM overlay and state (overlay.qcow2, pid, ssh port, logs, workspaces)
# - Build caches (cargo, pnpm, sccache)
# - Optionally: user configuration (flake)
#
# WARNING: This is destructive and cannot be undone!
#
# Usage: qvm clean [-f|--force]
# -f, --force Skip confirmation prompt
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Get path to qvm-stop script
readonly QVM_BIN_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly QVM_STOP="${QVM_BIN_DIR}/qvm-stop"
#
# confirm_clean - Prompt user for confirmation
# Args: $1 - whether to delete config (true/false)
# Returns: 0 if user confirms, exits script if user cancels
#
confirm_clean() {
echo
log_warn "This will delete ALL QVM data:"
echo " - Base image: $QVM_DATA_DIR"
echo " - State/overlay: $QVM_STATE_DIR"
echo " - Build caches: $QVM_CACHE_DIR"
echo " - Config/flake: $QVM_CONFIG_DIR"
echo
log_warn "This operation CANNOT be undone!"
echo "You will need to rebuild the base image from scratch next time."
echo
read -p "Are you absolutely sure? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Clean cancelled"
exit 0
fi
}
#
# main - Main cleanup orchestration
#
main() {
local force=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-f|--force)
force=true
shift
;;
*)
die "Unknown option: $1"
;;
esac
done
# Confirm unless --force is used
if [[ "$force" != "true" ]]; then
confirm_clean
fi
# Stop VM if running
if is_vm_running; then
log_info "Stopping running VM..."
"$QVM_STOP"
fi
# Delete directories
log_info "Removing QVM data directories..."
if [[ -d "$QVM_DATA_DIR" ]]; then
log_info " - Deleting: $QVM_DATA_DIR"
rm -rf "$QVM_DATA_DIR"
fi
if [[ -d "$QVM_STATE_DIR" ]]; then
log_info " - Deleting: $QVM_STATE_DIR"
rm -rf "$QVM_STATE_DIR"
fi
if [[ -d "$QVM_CACHE_DIR" ]]; then
log_info " - Deleting: $QVM_CACHE_DIR"
rm -rf "$QVM_CACHE_DIR"
fi
if [[ -d "$QVM_CONFIG_DIR" ]]; then
log_info " - Deleting: $QVM_CONFIG_DIR"
rm -rf "$QVM_CONFIG_DIR"
fi
# Print success message
echo
log_info "QVM cleaned successfully!"
echo
echo "All QVM data has been removed from your system."
echo "Next run of 'qvm start' will initialize everything from scratch."
echo
}
main "$@"

View file

@ -59,43 +59,45 @@ ensure_user_flake() {
} }
# #
# build_base_image - Build the base image using nix # build_vm - Build the VM runner using nix
# #
build_base_image() { build_base_image() {
log_info "Building base image from flake..." log_info "Building VM from flake..."
# Build the qcow2 output from user's flake # Build the VM output from user's flake
local build_result="$QVM_STATE_DIR/result" local build_result="$QVM_STATE_DIR/vm-result"
if ! nix build "$QVM_USER_FLAKE#qcow2" --out-link "$build_result"; then if ! nix build "$QVM_USER_FLAKE#vm" --out-link "$build_result"; then
die "Failed to build base image. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix" die "Failed to build VM. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix"
fi fi
# Verify the result contains nixos.qcow2 # Verify the result contains the VM runner script
local qcow2_path="$build_result/nixos.qcow2" local vm_runner="$build_result/bin/run-qvm-dev-vm"
if [[ ! -f "$qcow2_path" ]]; then if [[ ! -f "$vm_runner" ]]; then
die "Build succeeded but nixos.qcow2 not found at: $qcow2_path" # Try alternate name pattern
vm_runner=$(find "$build_result/bin" -name "run-*-vm" -type f 2>/dev/null | head -1)
if [[ -z "$vm_runner" || ! -f "$vm_runner" ]]; then
die "Build succeeded but VM runner script not found in: $build_result/bin/"
fi
fi fi
# Copy the qcow2 to base image location # Move the result symlink to data dir (keeps nix store reference)
log_info "Copying image to: $QVM_BASE_IMAGE" rm -f "$QVM_DATA_DIR/vm-result"
# Remove existing image first (may be read-only from Nix store copy) mv "$build_result" "$QVM_DATA_DIR/vm-result"
rm -f "$QVM_BASE_IMAGE"
cp -L "$qcow2_path" "$QVM_BASE_IMAGE"
# Ensure the new image is writable for future rebuilds
chmod 644 "$QVM_BASE_IMAGE"
# Remove the result symlink # Get the basename of the runner script and construct path in new location
rm -f "$build_result" local runner_name
runner_name=$(basename "$vm_runner")
vm_runner="$QVM_DATA_DIR/vm-result/bin/$runner_name"
# Get image size for informational output # Create a symlink to the VM runner at our standard location
local image_size log_info "Installing VM runner to: $QVM_VM_RUNNER"
image_size=$(du -h "$QVM_BASE_IMAGE" | cut -f1) rm -f "$QVM_VM_RUNNER"
ln -sf "$vm_runner" "$QVM_VM_RUNNER"
log_info "Base image built successfully" log_info "VM built successfully"
echo "" echo ""
echo "Base image: $QVM_BASE_IMAGE" echo "VM runner: $QVM_VM_RUNNER"
echo "Image size: $image_size"
} }
# #

View file

@ -153,17 +153,18 @@ is_workspace_mounted() {
# #
main() { main() {
# Show usage if no arguments # Show usage if no arguments
if [[ $# -eq 0 ]]; then # Handle help flags first
show_usage if [[ $# -gt 0 && ( "$1" == "-h" || "$1" == "--help" ) ]]; then
exit 1
fi
# Handle help flags
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
show_usage show_usage
exit 0 exit 0
fi fi
# If no command given, default to interactive zsh shell
local run_shell=false
if [[ $# -eq 0 ]]; then
run_shell=true
fi
# Get current workspace (absolute path) # Get current workspace (absolute path)
local workspace_path local workspace_path
workspace_path="$(pwd)" workspace_path="$(pwd)"
@ -268,25 +269,30 @@ main() {
# Add connection target # Add connection target
ssh_cmd+=(root@localhost) ssh_cmd+=(root@localhost)
# Build remote command: cd to workspace and execute user's command # Build remote command: cd to workspace and execute user's command (or shell)
# Quote each argument properly to handle spaces and special chars local remote_cmd="cd '$guest_path'"
local remote_cmd="cd '$guest_path' && "
# Append user's command with proper quoting if [[ "$run_shell" == "true" ]]; then
local first_arg=1 # No command - start interactive zsh shell
for arg in "$@"; do remote_cmd+=" && exec zsh"
if [[ $first_arg -eq 1 ]]; then else
remote_cmd+="$arg" # Append user's command with proper quoting
first_arg=0 remote_cmd+=" && "
else local first_arg=1
# Quote arguments that contain spaces or special characters for arg in "$@"; do
if [[ "$arg" =~ [[:space:]] ]]; then if [[ $first_arg -eq 1 ]]; then
remote_cmd+=" '$arg'" remote_cmd+="$arg"
first_arg=0
else else
remote_cmd+=" $arg" # Quote arguments that contain spaces or special characters
if [[ "$arg" =~ [[:space:]] ]]; then
remote_cmd+=" '$arg'"
else
remote_cmd+=" $arg"
fi
fi fi
fi done
done fi
# Add the remote command as final SSH argument # Add the remote command as final SSH argument
ssh_cmd+=("$remote_cmd") ssh_cmd+=("$remote_cmd")

View file

@ -1,15 +1,13 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# #
# qvm-start - Launch the QEMU VM with all required configuration # qvm-start - Launch the QVM using the NixOS VM runner
# #
# This script starts the QVM virtual machine with: # This script starts the QVM virtual machine by:
# - KVM acceleration and host CPU passthrough # - Building the VM if not already built
# - Configurable memory and CPU count # - Configuring QEMU options via environment variables
# - Overlay disk backed by base.qcow2 (copy-on-write) # - Adding 9p mounts for caches and workspaces
# - SSH port forwarding on auto-selected port # - Starting the VM in the background
# - 9p mounts for shared caches (cargo, pnpm, sccache) # - Waiting for SSH to become available
# - Serial console logging
# - Daemonized execution with PID file
# #
set -euo pipefail set -euo pipefail
@ -21,8 +19,6 @@ source "$QVM_LIB_DIR/common.sh"
# #
# find_available_port - Find an available TCP port starting from base # find_available_port - Find an available TCP port starting from base
# Args: $1 - starting port number (default: 2222)
# Returns: available port number on stdout
# #
find_available_port() { find_available_port() {
local port="${1:-2222}" local port="${1:-2222}"
@ -34,69 +30,63 @@ find_available_port() {
echo "$port" echo "$port"
return 0 return 0
fi fi
(( port++ )) (( port++ )) || true
(( attempt++ )) (( attempt++ )) || true
done done
die "Could not find available port after $max_attempts attempts" die "Could not find available port after $max_attempts attempts"
} }
# #
# mount_workspaces - Add virtfs entries for registered workspaces # build_qemu_opts - Build QEMU_OPTS environment variable with 9p mounts
# Args: $1 - name of array variable to append to
# Usage: mount_workspaces qemu_cmd
# #
mount_workspaces() { build_qemu_opts() {
local -n cmd_array=$1 local ssh_port="$1"
local opts=""
# Check if workspaces registry exists # 9p mounts for shared caches
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then opts+="-virtfs local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr "
log_info "No workspaces registry found, skipping workspace mounts" opts+="-virtfs local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr "
return 0 opts+="-virtfs local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr "
# Mount host opencode config if it exists
if [[ -d "$QVM_HOST_OPENCODE_CONFIG" ]]; then
log_info "Adding opencode config mount..."
opts+="-virtfs local,path=$QVM_HOST_OPENCODE_CONFIG,mount_tag=opencode_config,security_model=mapped-xattr "
fi fi
# Check if file is empty or invalid JSON # Add workspace mounts from registry
if [[ ! -s "$QVM_WORKSPACES_FILE" ]]; then if [[ -f "$QVM_WORKSPACES_FILE" && -s "$QVM_WORKSPACES_FILE" ]]; then
log_info "Workspaces registry is empty, skipping workspace mounts" local workspace_count
return 0 workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
fi
# Parse workspaces and add virtfs entries if [[ "$workspace_count" -gt 0 ]]; then
local workspace_count log_info "Adding $workspace_count workspace mount(s)..."
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ "$workspace_count" -eq 0 ]]; then local i=0
log_info "No workspaces registered, skipping workspace mounts" while (( i < workspace_count )); do
return 0 local path mount_tag
fi path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
log_info "Mounting $workspace_count workspace(s)..." if [[ -n "$path" && -n "$mount_tag" && "$path" != "null" && "$mount_tag" != "null" && -d "$path" ]]; then
log_info " - $path -> $mount_tag"
opts+="-virtfs local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr "
fi
# Iterate through workspaces and add virtfs entries (( i++ )) || true
local i=0 done
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
if [[ -z "$path" || -z "$mount_tag" || "$path" == "null" || "$mount_tag" == "null" ]]; then
log_warn "Skipping invalid workspace entry at index $i"
(( i++ ))
continue
fi fi
fi
# Verify path exists # Serial console to log file and daemonize
if [[ ! -d "$path" ]]; then opts+="-serial file:$QVM_SERIAL_LOG "
log_warn "Workspace path does not exist: $path (skipping)" opts+="-display none "
(( i++ )) opts+="-daemonize "
continue opts+="-pidfile $QVM_PID_FILE "
fi
log_info " - $path -> $mount_tag" echo "$opts"
cmd_array+=(-virtfs "local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr")
(( i++ )) || true # Prevent set -e from exiting when i was 0
done
} }
# #
@ -126,25 +116,26 @@ main() {
# First-run initialization # First-run initialization
ensure_dirs ensure_dirs
if [[ ! -f "$QVM_BASE_IMAGE" ]]; then # Check if VM runner exists, build if not
log_info "First run detected - building base image..." if [[ ! -L "$QVM_VM_RUNNER" || ! -f "$(readlink -f "$QVM_VM_RUNNER" 2>/dev/null || echo "")" ]]; then
log_info "First run detected - building VM..."
log_info "This may take several minutes." log_info "This may take several minutes."
# Call qvm-rebuild to build the image
SCRIPT_DIR="$(dirname "$0")" SCRIPT_DIR="$(dirname "$0")"
if ! "$SCRIPT_DIR/qvm-rebuild"; then if ! "$SCRIPT_DIR/qvm-rebuild"; then
die "Failed to build base image. Run 'qvm rebuild' manually to debug." die "Failed to build VM. Run 'qvm rebuild' manually to debug."
fi fi
fi fi
# Create overlay image if it doesn't exist # Verify VM runner exists now
if [[ ! -f "$QVM_OVERLAY" ]]; then if [[ ! -L "$QVM_VM_RUNNER" ]]; then
log_info "Creating overlay disk..." die "VM runner not found at $QVM_VM_RUNNER. Run 'qvm rebuild' first."
if ! qemu-img create -f qcow2 -b "$QVM_BASE_IMAGE" -F qcow2 "$QVM_OVERLAY"; then fi
die "Failed to create overlay disk"
fi local vm_script
else vm_script=$(readlink -f "$QVM_VM_RUNNER")
log_info "Using existing overlay disk" if [[ ! -f "$vm_script" ]]; then
die "VM runner script not found. Run 'qvm rebuild' to fix."
fi fi
# Find available SSH port # Find available SSH port
@ -153,63 +144,53 @@ main() {
log_info "Using SSH port: $ssh_port" log_info "Using SSH port: $ssh_port"
# Get memory and CPU settings from environment or use defaults # Get memory and CPU settings from environment or use defaults
local memory="${QVM_MEMORY:-40G}" local memory="${QVM_MEMORY:-30G}"
local cpus="${QVM_CPUS:-30}" local cpus="${QVM_CPUS:-30}"
log_info "VM resources: ${memory} memory, ${cpus} CPUs" log_info "VM resources: ${memory} memory, ${cpus} CPUs"
# Build QEMU command # Build QEMU options
local qemu_cmd=( local qemu_opts
qemu-system-x86_64 qemu_opts=$(build_qemu_opts "$ssh_port")
-enable-kvm
-cpu host
-m "$memory"
-smp "$cpus"
# Overlay disk (virtio for performance) # Launch VM using the NixOS runner script
-drive "file=$QVM_OVERLAY,if=virtio,format=qcow2" # The runner script respects these environment variables:
# - QEMU_OPTS: additional QEMU options
# - NIX_DISK_IMAGE: path to disk image (optional, uses tmpdir by default)
log_info "Launching VM..."
# User-mode networking with SSH port forward # Create persistent disk image location if needed
-netdev "user,id=net0,hostfwd=tcp::${ssh_port}-:22" local disk_image="$QVM_STATE_DIR/qvm-dev.qcow2"
-device "virtio-net-pci,netdev=net0"
# 9p mounts for shared caches (security_model=mapped-xattr for proper permissions) export QEMU_OPTS="$qemu_opts -m $memory -smp $cpus"
# Note: trans, version, msize are kernel-side mount options (in NixOS flake), not QEMU options export QEMU_NET_OPTS="hostfwd=tcp::${ssh_port}-:22"
-virtfs "local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr" export NIX_DISK_IMAGE="$disk_image"
-virtfs "local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr"
-virtfs "local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr"
-virtfs "local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr"
)
# Add workspace mounts from registry # Run VM - the script uses exec with qemu's -daemonize flag, so it returns quickly
mount_workspaces qemu_cmd if ! "$vm_script" &>/dev/null; then
# Continue building QEMU command
qemu_cmd+=(
# Serial console to log file
-serial "file:$QVM_SERIAL_LOG"
# No graphics (use -display none for daemonized mode)
-display none
# Daemonize with PID file
-daemonize
-pidfile "$QVM_PID_FILE"
)
# Launch QEMU
log_info "Launching QEMU..."
if ! "${qemu_cmd[@]}"; then
cleanup_on_failure cleanup_on_failure
die "Failed to start QEMU" die "Failed to start VM"
fi
# Wait a moment for QEMU to create PID file
sleep 2
# If PID file wasn't created by our QEMU_OPTS, get it from the background process
if [[ ! -f "$QVM_PID_FILE" ]]; then
# Try to find the QEMU process
local qemu_pid
qemu_pid=$(pgrep -f "qemu.*qvm-dev" | head -1 || echo "")
if [[ -n "$qemu_pid" ]]; then
echo "$qemu_pid" > "$QVM_PID_FILE"
fi
fi fi
# Save SSH port to file # Save SSH port to file
echo "$ssh_port" > "$QVM_SSH_PORT_FILE" echo "$ssh_port" > "$QVM_SSH_PORT_FILE"
# Wait for SSH to become available # Wait for SSH to become available
if ! wait_for_ssh "$ssh_port" 60; then if ! wait_for_ssh "$ssh_port" 120; then
cleanup_on_failure cleanup_on_failure
die "VM started but SSH did not become available" die "VM started but SSH did not become available. Check: $QVM_SERIAL_LOG"
fi fi
# Success! # Success!

View file

@ -40,7 +40,7 @@
imports = [ imports = [
inputs.home-manager.nixosModules.home-manager inputs.home-manager.nixosModules.home-manager
inputs.ros_neovim.nixosModules.default # inputs.ros_neovim.nixosModules.default
inputs.common.nixosModules.essentials inputs.common.nixosModules.essentials
inputs.common.nixosModules.git inputs.common.nixosModules.git
inputs.common.nixosModules.zsh inputs.common.nixosModules.zsh
@ -76,20 +76,21 @@
useGlobalPkgs = true; useGlobalPkgs = true;
backupFileExtension = "bak"; backupFileExtension = "bak";
sharedModules = [
inputs.common.homeManagerModules.atuin
inputs.common.homeManagerModules.git
inputs.common.homeManagerModules.postgres_cli_options
inputs.common.homeManagerModules.starship
inputs.common.homeManagerModules.zoxide
inputs.common.homeManagerModules.zsh
inputs.common.homeManagerModules.tmux
inputs.common.homeManagerModules.direnv
({ programs.direnv.config.whitelist.prefix = [ "/" ]; })
];
users.root = { users.root = {
home.stateVersion = stateVersion; home.stateVersion = stateVersion;
programs.home-manager.enable = true; programs.home-manager.enable = true;
sharedModules = [
inputs.common.homeManagerModules.atuin
inputs.common.homeManagerModules.git
inputs.common.homeManagerModules.postgres_cli_options
inputs.common.homeManagerModules.starship
inputs.common.homeManagerModules.zoxide
inputs.common.homeManagerModules.zsh
inputs.common.homeManagerModules.tmux
inputs.common.homeManagerModules.direnv
];
}; };
}; };
@ -164,6 +165,18 @@
]; ];
}; };
fileSystems."/root/.config/opencode" = {
device = "opencode_config";
fsType = "9p";
options = [
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
};
# Environment variables for cache directories # Environment variables for cache directories
environment.variables = { environment.variables = {
CARGO_HOME = "/cache/cargo"; CARGO_HOME = "/cache/cargo";
@ -209,7 +222,7 @@
''; '';
# 35GB disk size # GB disk size
virtualisation.diskSize = 40 * 1024; virtualisation.diskSize = 40 * 1024;
system.stateVersion = stateVersion; system.stateVersion = stateVersion;
@ -217,20 +230,33 @@
in in
let let
qcow2Image = nixos-generators.nixosGenerate { pkgs = nixpkgs.legacyPackages.${system};
# Use standard NixOS VM builder instead of nixos-generators
# nixos-generators qcow format has a 100MB RAM build VM that OOMs with large closures
baseVm = nixpkgs.lib.nixosSystem {
inherit system; inherit system;
format = "qcow";
modules = [ vmModule ]; modules = [ vmModule ];
}; };
in in
{ {
# Export the qcow2 image nixosConfigurations.base = baseVm;
# Runnable VM script (./result/bin/run-qvm-dev-vm)
packages.${system} = { packages.${system} = {
qcow2 = qcow2Image; vm = baseVm.config.system.build.vm;
default = qcow2Image; default = baseVm.config.system.build.vm;
};
apps.${system}.default = {
type = "app";
program = "${baseVm.config.system.build.vm}/bin/run-qvm-dev-vm";
};
devShells.${system}.default = pkgs.mkShellNoCC {
QEMU_NET_OPTS = "hostfwd=tcp::2222-:22";
}; };
# Export the module for reuse
nixosModules.default = vmModule; nixosModules.default = vmModule;
}; };
} }

View file

@ -23,6 +23,7 @@ readonly QVM_SSH_PORT_FILE="$QVM_STATE_DIR/ssh.port"
readonly QVM_SERIAL_LOG="$QVM_STATE_DIR/serial.log" readonly QVM_SERIAL_LOG="$QVM_STATE_DIR/serial.log"
readonly QVM_WORKSPACES_FILE="$QVM_STATE_DIR/workspaces.json" readonly QVM_WORKSPACES_FILE="$QVM_STATE_DIR/workspaces.json"
readonly QVM_USER_FLAKE="$QVM_CONFIG_DIR/flake" readonly QVM_USER_FLAKE="$QVM_CONFIG_DIR/flake"
readonly QVM_VM_RUNNER="$QVM_DATA_DIR/run-vm"
# Cache directories for 9p mounts (shared between host and VM) # Cache directories for 9p mounts (shared between host and VM)
readonly QVM_CARGO_HOME="$QVM_CACHE_DIR/cargo-home" readonly QVM_CARGO_HOME="$QVM_CACHE_DIR/cargo-home"
@ -30,17 +31,20 @@ readonly QVM_CARGO_TARGET="$QVM_CACHE_DIR/cargo-target"
readonly QVM_PNPM_STORE="$QVM_CACHE_DIR/pnpm-store" readonly QVM_PNPM_STORE="$QVM_CACHE_DIR/pnpm-store"
readonly QVM_SCCACHE="$QVM_CACHE_DIR/sccache" readonly QVM_SCCACHE="$QVM_CACHE_DIR/sccache"
# Host config directories to mount in VM (read-write for tools that need it)
readonly QVM_HOST_OPENCODE_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/opencode"
# Color codes (only used if stdout is a TTY) # Color codes (only used if stdout is a TTY)
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
readonly COLOR_INFO='\033[0;36m' # Cyan readonly COLOR_INFO='\033[0;36m' # Cyan
readonly COLOR_WARN='\033[0;33m' # Yellow readonly COLOR_WARN='\033[0;33m' # Yellow
readonly COLOR_ERROR='\033[0;31m' # Red readonly COLOR_ERROR='\033[0;31m' # Red
readonly COLOR_RESET='\033[0m' # Reset readonly COLOR_RESET='\033[0m' # Reset
else else
readonly COLOR_INFO='' readonly COLOR_INFO=''
readonly COLOR_WARN='' readonly COLOR_WARN=''
readonly COLOR_ERROR='' readonly COLOR_ERROR=''
readonly COLOR_RESET='' readonly COLOR_RESET=''
fi fi
# #
@ -48,7 +52,7 @@ fi
# Usage: log_info "message" # Usage: log_info "message"
# #
log_info() { log_info() {
echo -e "${COLOR_INFO}[INFO]${COLOR_RESET} $*" >&2 echo -e "${COLOR_INFO}[INFO]${COLOR_RESET} $*" >&2
} }
# #
@ -56,7 +60,7 @@ log_info() {
# Usage: log_warn "message" # Usage: log_warn "message"
# #
log_warn() { log_warn() {
echo -e "${COLOR_WARN}[WARN]${COLOR_RESET} $*" >&2 echo -e "${COLOR_WARN}[WARN]${COLOR_RESET} $*" >&2
} }
# #
@ -64,7 +68,7 @@ log_warn() {
# Usage: log_error "message" # Usage: log_error "message"
# #
log_error() { log_error() {
echo -e "${COLOR_ERROR}[ERROR]${COLOR_RESET} $*" >&2 echo -e "${COLOR_ERROR}[ERROR]${COLOR_RESET} $*" >&2
} }
# #
@ -72,8 +76,8 @@ log_error() {
# Usage: die "error message" # Usage: die "error message"
# #
die() { die() {
log_error "$@" log_error "$@"
exit 1 exit 1
} }
# #
@ -81,14 +85,14 @@ die() {
# Usage: ensure_dirs # Usage: ensure_dirs
# #
ensure_dirs() { ensure_dirs() {
mkdir -p "$QVM_DATA_DIR" \ mkdir -p "$QVM_DATA_DIR" \
"$QVM_STATE_DIR" \ "$QVM_STATE_DIR" \
"$QVM_CACHE_DIR" \ "$QVM_CACHE_DIR" \
"$QVM_CONFIG_DIR" \ "$QVM_CONFIG_DIR" \
"$QVM_CARGO_HOME" \ "$QVM_CARGO_HOME" \
"$QVM_CARGO_TARGET" \ "$QVM_CARGO_TARGET" \
"$QVM_PNPM_STORE" \ "$QVM_PNPM_STORE" \
"$QVM_SCCACHE" "$QVM_SCCACHE"
} }
# #
@ -97,21 +101,21 @@ ensure_dirs() {
# Usage: if is_vm_running; then ... fi # Usage: if is_vm_running; then ... fi
# #
is_vm_running() { is_vm_running() {
if [[ ! -f "$QVM_PID_FILE" ]]; then if [[ ! -f "$QVM_PID_FILE" ]]; then
return 1 return 1
fi fi
local pid local pid
pid=$(cat "$QVM_PID_FILE") pid=$(cat "$QVM_PID_FILE")
# Check if process exists and is a QEMU process # Check if process exists and is a QEMU process
if kill -0 "$pid" 2>/dev/null; then if kill -0 "$pid" 2>/dev/null; then
return 0 return 0
else else
# Stale PID file, remove it # Stale PID file, remove it
rm -f "$QVM_PID_FILE" rm -f "$QVM_PID_FILE"
return 1 return 1
fi fi
} }
# #
@ -120,10 +124,10 @@ is_vm_running() {
# Usage: port=$(get_ssh_port) # Usage: port=$(get_ssh_port)
# #
get_ssh_port() { get_ssh_port() {
if [[ ! -f "$QVM_SSH_PORT_FILE" ]]; then if [[ ! -f "$QVM_SSH_PORT_FILE" ]]; then
die "SSH port file not found. Is the VM running?" die "SSH port file not found. Is the VM running?"
fi fi
cat "$QVM_SSH_PORT_FILE" cat "$QVM_SSH_PORT_FILE"
} }
# #
@ -133,8 +137,8 @@ get_ssh_port() {
# Usage: hash=$(workspace_hash "/path/to/workspace") # Usage: hash=$(workspace_hash "/path/to/workspace")
# #
workspace_hash() { workspace_hash() {
local path="$1" local path="$1"
echo -n "$path" | sha256sum | cut -c1-8 echo -n "$path" | sha256sum | cut -c1-8
} }
# #
@ -145,25 +149,36 @@ workspace_hash() {
# Usage: wait_for_ssh "$port" 30 # Usage: wait_for_ssh "$port" 30
# #
wait_for_ssh() { wait_for_ssh() {
local port="${1:-}" local port="${1:-}"
local timeout="${2:-60}" local timeout="${2:-60}"
local elapsed=0 local elapsed=0
if [[ -z "$port" ]]; then if [[ -z "$port" ]]; then
die "wait_for_ssh requires port argument" die "wait_for_ssh requires port argument"
fi fi
log_info "Waiting for SSH on port $port (timeout: ${timeout}s)..." log_info "Waiting for SSH on port $port (timeout: ${timeout}s)..."
while (( elapsed < timeout )); do while ((elapsed < timeout)); do
if nc -z -w 1 localhost "$port" 2>/dev/null; then # Actually attempt SSH connection to verify sshd is responding
log_info "SSH is ready" # nc -z only checks if port is open (QEMU opens it immediately)
return 0 # We need to verify sshd is actually ready to accept connections
fi if timeout 2 sshpass -p root ssh \
sleep 1 -o StrictHostKeyChecking=no \
(( elapsed++ )) -o UserKnownHostsFile=/dev/null \
done -o LogLevel=ERROR \
-o PubkeyAuthentication=no \
-o PasswordAuthentication=yes \
-o ConnectTimeout=1 \
-p "$port" \
root@localhost "true" 2>/dev/null; then
log_info "SSH is ready"
return 0
fi
sleep 1
((elapsed++))
done
log_error "SSH did not become available within ${timeout}s" log_error "SSH did not become available within ${timeout}s"
return 1 return 1
} }