Add qvm clean; make rebuild produce VM runner; default qvm run to shell

This commit is contained in:
Joshua Bell 2026-01-26 10:14:23 -06:00
parent e766c8466d
commit 601b4ab15e
7 changed files with 395 additions and 249 deletions

View file

@ -29,11 +29,12 @@ USAGE:
COMMANDS:
start Start the VM (create if needed)
stop Stop the running VM
run Execute a command in the VM
run Execute a command in the VM (or start shell if no command)
ssh Open SSH session or run command in VM
status Show VM status and information
rebuild Rebuild the base VM image from flake
reset Delete overlay and start fresh (keeps base image)
clean Remove ALL QVM data (images, state, caches)
OPTIONS:
-h, --help Show this help message
@ -72,7 +73,7 @@ main() {
shift
case "$subcommand" in
start|stop|run|ssh|status|rebuild|reset)
start|stop|run|ssh|status|rebuild|reset|clean)
# Route to the appropriate qvm-* script
# Use exec to replace this process with the subcommand
exec "${SCRIPT_DIR}/qvm-${subcommand}" "$@"

115
bin/qvm-clean Executable file
View file

@ -0,0 +1,115 @@
#!/usr/bin/env bash
#
# qvm-clean - Completely remove all QVM state, images, and caches
#
# This script performs a full cleanup of all QVM-related data:
# - Base image (base.qcow2)
# - VM overlay and state (overlay.qcow2, pid, ssh port, logs, workspaces)
# - Build caches (cargo, pnpm, sccache)
# - Optionally: user configuration (flake)
#
# WARNING: This is destructive and cannot be undone!
#
# Usage: qvm clean [-f|--force]
# -f, --force Skip confirmation prompt
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Get path to qvm-stop script
readonly QVM_BIN_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly QVM_STOP="${QVM_BIN_DIR}/qvm-stop"
#
# confirm_clean - Prompt user for confirmation
# Args: $1 - whether to delete config (true/false)
# Returns: 0 if user confirms, exits script if user cancels
#
confirm_clean() {
echo
log_warn "This will delete ALL QVM data:"
echo " - Base image: $QVM_DATA_DIR"
echo " - State/overlay: $QVM_STATE_DIR"
echo " - Build caches: $QVM_CACHE_DIR"
echo " - Config/flake: $QVM_CONFIG_DIR"
echo
log_warn "This operation CANNOT be undone!"
echo "You will need to rebuild the base image from scratch next time."
echo
read -p "Are you absolutely sure? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Clean cancelled"
exit 0
fi
}
#
# main - Main cleanup orchestration
#
main() {
local force=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-f|--force)
force=true
shift
;;
*)
die "Unknown option: $1"
;;
esac
done
# Confirm unless --force is used
if [[ "$force" != "true" ]]; then
confirm_clean
fi
# Stop VM if running
if is_vm_running; then
log_info "Stopping running VM..."
"$QVM_STOP"
fi
# Delete directories
log_info "Removing QVM data directories..."
if [[ -d "$QVM_DATA_DIR" ]]; then
log_info " - Deleting: $QVM_DATA_DIR"
rm -rf "$QVM_DATA_DIR"
fi
if [[ -d "$QVM_STATE_DIR" ]]; then
log_info " - Deleting: $QVM_STATE_DIR"
rm -rf "$QVM_STATE_DIR"
fi
if [[ -d "$QVM_CACHE_DIR" ]]; then
log_info " - Deleting: $QVM_CACHE_DIR"
rm -rf "$QVM_CACHE_DIR"
fi
if [[ -d "$QVM_CONFIG_DIR" ]]; then
log_info " - Deleting: $QVM_CONFIG_DIR"
rm -rf "$QVM_CONFIG_DIR"
fi
# Print success message
echo
log_info "QVM cleaned successfully!"
echo
echo "All QVM data has been removed from your system."
echo "Next run of 'qvm start' will initialize everything from scratch."
echo
}
main "$@"

View file

@ -59,43 +59,45 @@ ensure_user_flake() {
}
#
# build_base_image - Build the base image using nix
# build_vm - Build the VM runner using nix
#
build_base_image() {
log_info "Building base image from flake..."
log_info "Building VM from flake..."
# Build the qcow2 output from user's flake
local build_result="$QVM_STATE_DIR/result"
# Build the VM output from user's flake
local build_result="$QVM_STATE_DIR/vm-result"
if ! nix build "$QVM_USER_FLAKE#qcow2" --out-link "$build_result"; then
die "Failed to build base image. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix"
if ! nix build "$QVM_USER_FLAKE#vm" --out-link "$build_result"; then
die "Failed to build VM. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix"
fi
# Verify the result contains nixos.qcow2
local qcow2_path="$build_result/nixos.qcow2"
if [[ ! -f "$qcow2_path" ]]; then
die "Build succeeded but nixos.qcow2 not found at: $qcow2_path"
# Verify the result contains the VM runner script
local vm_runner="$build_result/bin/run-qvm-dev-vm"
if [[ ! -f "$vm_runner" ]]; then
# Try alternate name pattern
vm_runner=$(find "$build_result/bin" -name "run-*-vm" -type f 2>/dev/null | head -1)
if [[ -z "$vm_runner" || ! -f "$vm_runner" ]]; then
die "Build succeeded but VM runner script not found in: $build_result/bin/"
fi
fi
# Copy the qcow2 to base image location
log_info "Copying image to: $QVM_BASE_IMAGE"
# Remove existing image first (may be read-only from Nix store copy)
rm -f "$QVM_BASE_IMAGE"
cp -L "$qcow2_path" "$QVM_BASE_IMAGE"
# Ensure the new image is writable for future rebuilds
chmod 644 "$QVM_BASE_IMAGE"
# Move the result symlink to data dir (keeps nix store reference)
rm -f "$QVM_DATA_DIR/vm-result"
mv "$build_result" "$QVM_DATA_DIR/vm-result"
# Remove the result symlink
rm -f "$build_result"
# Get the basename of the runner script and construct path in new location
local runner_name
runner_name=$(basename "$vm_runner")
vm_runner="$QVM_DATA_DIR/vm-result/bin/$runner_name"
# Get image size for informational output
local image_size
image_size=$(du -h "$QVM_BASE_IMAGE" | cut -f1)
# Create a symlink to the VM runner at our standard location
log_info "Installing VM runner to: $QVM_VM_RUNNER"
rm -f "$QVM_VM_RUNNER"
ln -sf "$vm_runner" "$QVM_VM_RUNNER"
log_info "Base image built successfully"
log_info "VM built successfully"
echo ""
echo "Base image: $QVM_BASE_IMAGE"
echo "Image size: $image_size"
echo "VM runner: $QVM_VM_RUNNER"
}
#

View file

@ -153,17 +153,18 @@ is_workspace_mounted() {
#
main() {
# Show usage if no arguments
if [[ $# -eq 0 ]]; then
show_usage
exit 1
fi
# Handle help flags
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
# Handle help flags first
if [[ $# -gt 0 && ( "$1" == "-h" || "$1" == "--help" ) ]]; then
show_usage
exit 0
fi
# If no command given, default to interactive zsh shell
local run_shell=false
if [[ $# -eq 0 ]]; then
run_shell=true
fi
# Get current workspace (absolute path)
local workspace_path
workspace_path="$(pwd)"
@ -268,25 +269,30 @@ main() {
# Add connection target
ssh_cmd+=(root@localhost)
# Build remote command: cd to workspace and execute user's command
# Quote each argument properly to handle spaces and special chars
local remote_cmd="cd '$guest_path' && "
# Build remote command: cd to workspace and execute user's command (or shell)
local remote_cmd="cd '$guest_path'"
# Append user's command with proper quoting
local first_arg=1
for arg in "$@"; do
if [[ $first_arg -eq 1 ]]; then
remote_cmd+="$arg"
first_arg=0
else
# Quote arguments that contain spaces or special characters
if [[ "$arg" =~ [[:space:]] ]]; then
remote_cmd+=" '$arg'"
if [[ "$run_shell" == "true" ]]; then
# No command - start interactive zsh shell
remote_cmd+=" && exec zsh"
else
# Append user's command with proper quoting
remote_cmd+=" && "
local first_arg=1
for arg in "$@"; do
if [[ $first_arg -eq 1 ]]; then
remote_cmd+="$arg"
first_arg=0
else
remote_cmd+=" $arg"
# Quote arguments that contain spaces or special characters
if [[ "$arg" =~ [[:space:]] ]]; then
remote_cmd+=" '$arg'"
else
remote_cmd+=" $arg"
fi
fi
fi
done
done
fi
# Add the remote command as final SSH argument
ssh_cmd+=("$remote_cmd")

View file

@ -1,15 +1,13 @@
#!/usr/bin/env bash
#
# qvm-start - Launch the QEMU VM with all required configuration
# qvm-start - Launch the QVM using the NixOS VM runner
#
# This script starts the QVM virtual machine with:
# - KVM acceleration and host CPU passthrough
# - Configurable memory and CPU count
# - Overlay disk backed by base.qcow2 (copy-on-write)
# - SSH port forwarding on auto-selected port
# - 9p mounts for shared caches (cargo, pnpm, sccache)
# - Serial console logging
# - Daemonized execution with PID file
# This script starts the QVM virtual machine by:
# - Building the VM if not already built
# - Configuring QEMU options via environment variables
# - Adding 9p mounts for caches and workspaces
# - Starting the VM in the background
# - Waiting for SSH to become available
#
set -euo pipefail
@ -21,8 +19,6 @@ source "$QVM_LIB_DIR/common.sh"
#
# find_available_port - Find an available TCP port starting from base
# Args: $1 - starting port number (default: 2222)
# Returns: available port number on stdout
#
find_available_port() {
local port="${1:-2222}"
@ -34,69 +30,63 @@ find_available_port() {
echo "$port"
return 0
fi
(( port++ ))
(( attempt++ ))
(( port++ )) || true
(( attempt++ )) || true
done
die "Could not find available port after $max_attempts attempts"
}
#
# mount_workspaces - Add virtfs entries for registered workspaces
# Args: $1 - name of array variable to append to
# Usage: mount_workspaces qemu_cmd
# build_qemu_opts - Build QEMU_OPTS environment variable with 9p mounts
#
mount_workspaces() {
local -n cmd_array=$1
build_qemu_opts() {
local ssh_port="$1"
local opts=""
# Check if workspaces registry exists
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "No workspaces registry found, skipping workspace mounts"
return 0
# 9p mounts for shared caches
opts+="-virtfs local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr "
# Mount host opencode config if it exists
if [[ -d "$QVM_HOST_OPENCODE_CONFIG" ]]; then
log_info "Adding opencode config mount..."
opts+="-virtfs local,path=$QVM_HOST_OPENCODE_CONFIG,mount_tag=opencode_config,security_model=mapped-xattr "
fi
# Check if file is empty or invalid JSON
if [[ ! -s "$QVM_WORKSPACES_FILE" ]]; then
log_info "Workspaces registry is empty, skipping workspace mounts"
return 0
fi
# Parse workspaces and add virtfs entries
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ "$workspace_count" -eq 0 ]]; then
log_info "No workspaces registered, skipping workspace mounts"
return 0
fi
log_info "Mounting $workspace_count workspace(s)..."
# Iterate through workspaces and add virtfs entries
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
# Add workspace mounts from registry
if [[ -f "$QVM_WORKSPACES_FILE" && -s "$QVM_WORKSPACES_FILE" ]]; then
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ -z "$path" || -z "$mount_tag" || "$path" == "null" || "$mount_tag" == "null" ]]; then
log_warn "Skipping invalid workspace entry at index $i"
(( i++ ))
continue
if [[ "$workspace_count" -gt 0 ]]; then
log_info "Adding $workspace_count workspace mount(s)..."
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
if [[ -n "$path" && -n "$mount_tag" && "$path" != "null" && "$mount_tag" != "null" && -d "$path" ]]; then
log_info " - $path -> $mount_tag"
opts+="-virtfs local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr "
fi
(( i++ )) || true
done
fi
# Verify path exists
if [[ ! -d "$path" ]]; then
log_warn "Workspace path does not exist: $path (skipping)"
(( i++ ))
continue
fi
log_info " - $path -> $mount_tag"
cmd_array+=(-virtfs "local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr")
(( i++ )) || true # Prevent set -e from exiting when i was 0
done
fi
# Serial console to log file and daemonize
opts+="-serial file:$QVM_SERIAL_LOG "
opts+="-display none "
opts+="-daemonize "
opts+="-pidfile $QVM_PID_FILE "
echo "$opts"
}
#
@ -126,25 +116,26 @@ main() {
# First-run initialization
ensure_dirs
if [[ ! -f "$QVM_BASE_IMAGE" ]]; then
log_info "First run detected - building base image..."
# Check if VM runner exists, build if not
if [[ ! -L "$QVM_VM_RUNNER" || ! -f "$(readlink -f "$QVM_VM_RUNNER" 2>/dev/null || echo "")" ]]; then
log_info "First run detected - building VM..."
log_info "This may take several minutes."
# Call qvm-rebuild to build the image
SCRIPT_DIR="$(dirname "$0")"
if ! "$SCRIPT_DIR/qvm-rebuild"; then
die "Failed to build base image. Run 'qvm rebuild' manually to debug."
die "Failed to build VM. Run 'qvm rebuild' manually to debug."
fi
fi
# Create overlay image if it doesn't exist
if [[ ! -f "$QVM_OVERLAY" ]]; then
log_info "Creating overlay disk..."
if ! qemu-img create -f qcow2 -b "$QVM_BASE_IMAGE" -F qcow2 "$QVM_OVERLAY"; then
die "Failed to create overlay disk"
fi
else
log_info "Using existing overlay disk"
# Verify VM runner exists now
if [[ ! -L "$QVM_VM_RUNNER" ]]; then
die "VM runner not found at $QVM_VM_RUNNER. Run 'qvm rebuild' first."
fi
local vm_script
vm_script=$(readlink -f "$QVM_VM_RUNNER")
if [[ ! -f "$vm_script" ]]; then
die "VM runner script not found. Run 'qvm rebuild' to fix."
fi
# Find available SSH port
@ -153,63 +144,53 @@ main() {
log_info "Using SSH port: $ssh_port"
# Get memory and CPU settings from environment or use defaults
local memory="${QVM_MEMORY:-40G}"
local memory="${QVM_MEMORY:-30G}"
local cpus="${QVM_CPUS:-30}"
log_info "VM resources: ${memory} memory, ${cpus} CPUs"
# Build QEMU command
local qemu_cmd=(
qemu-system-x86_64
-enable-kvm
-cpu host
-m "$memory"
-smp "$cpus"
# Overlay disk (virtio for performance)
-drive "file=$QVM_OVERLAY,if=virtio,format=qcow2"
# User-mode networking with SSH port forward
-netdev "user,id=net0,hostfwd=tcp::${ssh_port}-:22"
-device "virtio-net-pci,netdev=net0"
# 9p mounts for shared caches (security_model=mapped-xattr for proper permissions)
# Note: trans, version, msize are kernel-side mount options (in NixOS flake), not QEMU options
-virtfs "local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr"
-virtfs "local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr"
-virtfs "local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr"
-virtfs "local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr"
)
# Build QEMU options
local qemu_opts
qemu_opts=$(build_qemu_opts "$ssh_port")
# Add workspace mounts from registry
mount_workspaces qemu_cmd
# Launch VM using the NixOS runner script
# The runner script respects these environment variables:
# - QEMU_OPTS: additional QEMU options
# - NIX_DISK_IMAGE: path to disk image (optional, uses tmpdir by default)
log_info "Launching VM..."
# Continue building QEMU command
qemu_cmd+=(
# Serial console to log file
-serial "file:$QVM_SERIAL_LOG"
# No graphics (use -display none for daemonized mode)
-display none
# Daemonize with PID file
-daemonize
-pidfile "$QVM_PID_FILE"
)
# Create persistent disk image location if needed
local disk_image="$QVM_STATE_DIR/qvm-dev.qcow2"
# Launch QEMU
log_info "Launching QEMU..."
if ! "${qemu_cmd[@]}"; then
export QEMU_OPTS="$qemu_opts -m $memory -smp $cpus"
export QEMU_NET_OPTS="hostfwd=tcp::${ssh_port}-:22"
export NIX_DISK_IMAGE="$disk_image"
# Run VM - the script uses exec with qemu's -daemonize flag, so it returns quickly
if ! "$vm_script" &>/dev/null; then
cleanup_on_failure
die "Failed to start QEMU"
die "Failed to start VM"
fi
# Wait a moment for QEMU to create PID file
sleep 2
# If PID file wasn't created by our QEMU_OPTS, get it from the background process
if [[ ! -f "$QVM_PID_FILE" ]]; then
# Try to find the QEMU process
local qemu_pid
qemu_pid=$(pgrep -f "qemu.*qvm-dev" | head -1 || echo "")
if [[ -n "$qemu_pid" ]]; then
echo "$qemu_pid" > "$QVM_PID_FILE"
fi
fi
# Save SSH port to file
echo "$ssh_port" > "$QVM_SSH_PORT_FILE"
# Wait for SSH to become available
if ! wait_for_ssh "$ssh_port" 60; then
if ! wait_for_ssh "$ssh_port" 120; then
cleanup_on_failure
die "VM started but SSH did not become available"
die "VM started but SSH did not become available. Check: $QVM_SERIAL_LOG"
fi
# Success!

View file

@ -40,7 +40,7 @@
imports = [
inputs.home-manager.nixosModules.home-manager
inputs.ros_neovim.nixosModules.default
# inputs.ros_neovim.nixosModules.default
inputs.common.nixosModules.essentials
inputs.common.nixosModules.git
inputs.common.nixosModules.zsh
@ -76,20 +76,21 @@
useGlobalPkgs = true;
backupFileExtension = "bak";
sharedModules = [
inputs.common.homeManagerModules.atuin
inputs.common.homeManagerModules.git
inputs.common.homeManagerModules.postgres_cli_options
inputs.common.homeManagerModules.starship
inputs.common.homeManagerModules.zoxide
inputs.common.homeManagerModules.zsh
inputs.common.homeManagerModules.tmux
inputs.common.homeManagerModules.direnv
({ programs.direnv.config.whitelist.prefix = [ "/" ]; })
];
users.root = {
home.stateVersion = stateVersion;
programs.home-manager.enable = true;
sharedModules = [
inputs.common.homeManagerModules.atuin
inputs.common.homeManagerModules.git
inputs.common.homeManagerModules.postgres_cli_options
inputs.common.homeManagerModules.starship
inputs.common.homeManagerModules.zoxide
inputs.common.homeManagerModules.zsh
inputs.common.homeManagerModules.tmux
inputs.common.homeManagerModules.direnv
];
};
};
@ -164,6 +165,18 @@
];
};
fileSystems."/root/.config/opencode" = {
device = "opencode_config";
fsType = "9p";
options = [
"trans=virtio"
"version=9p2000.L"
"msize=104857600"
"_netdev"
"nofail"
];
};
# Environment variables for cache directories
environment.variables = {
CARGO_HOME = "/cache/cargo";
@ -209,7 +222,7 @@
'';
# 35GB disk size
# GB disk size
virtualisation.diskSize = 40 * 1024;
system.stateVersion = stateVersion;
@ -217,20 +230,33 @@
in
let
qcow2Image = nixos-generators.nixosGenerate {
pkgs = nixpkgs.legacyPackages.${system};
# Use standard NixOS VM builder instead of nixos-generators
# nixos-generators qcow format has a 100MB RAM build VM that OOMs with large closures
baseVm = nixpkgs.lib.nixosSystem {
inherit system;
format = "qcow";
modules = [ vmModule ];
};
in
{
# Export the qcow2 image
nixosConfigurations.base = baseVm;
# Runnable VM script (./result/bin/run-qvm-dev-vm)
packages.${system} = {
qcow2 = qcow2Image;
default = qcow2Image;
vm = baseVm.config.system.build.vm;
default = baseVm.config.system.build.vm;
};
apps.${system}.default = {
type = "app";
program = "${baseVm.config.system.build.vm}/bin/run-qvm-dev-vm";
};
devShells.${system}.default = pkgs.mkShellNoCC {
QEMU_NET_OPTS = "hostfwd=tcp::2222-:22";
};
# Export the module for reuse
nixosModules.default = vmModule;
};
}

View file

@ -23,6 +23,7 @@ readonly QVM_SSH_PORT_FILE="$QVM_STATE_DIR/ssh.port"
readonly QVM_SERIAL_LOG="$QVM_STATE_DIR/serial.log"
readonly QVM_WORKSPACES_FILE="$QVM_STATE_DIR/workspaces.json"
readonly QVM_USER_FLAKE="$QVM_CONFIG_DIR/flake"
readonly QVM_VM_RUNNER="$QVM_DATA_DIR/run-vm"
# Cache directories for 9p mounts (shared between host and VM)
readonly QVM_CARGO_HOME="$QVM_CACHE_DIR/cargo-home"
@ -30,17 +31,20 @@ readonly QVM_CARGO_TARGET="$QVM_CACHE_DIR/cargo-target"
readonly QVM_PNPM_STORE="$QVM_CACHE_DIR/pnpm-store"
readonly QVM_SCCACHE="$QVM_CACHE_DIR/sccache"
# Host config directories to mount in VM (read-write for tools that need it)
readonly QVM_HOST_OPENCODE_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/opencode"
# Color codes (only used if stdout is a TTY)
if [[ -t 1 ]]; then
readonly COLOR_INFO='\033[0;36m' # Cyan
readonly COLOR_WARN='\033[0;33m' # Yellow
readonly COLOR_ERROR='\033[0;31m' # Red
readonly COLOR_RESET='\033[0m' # Reset
readonly COLOR_INFO='\033[0;36m' # Cyan
readonly COLOR_WARN='\033[0;33m' # Yellow
readonly COLOR_ERROR='\033[0;31m' # Red
readonly COLOR_RESET='\033[0m' # Reset
else
readonly COLOR_INFO=''
readonly COLOR_WARN=''
readonly COLOR_ERROR=''
readonly COLOR_RESET=''
readonly COLOR_INFO=''
readonly COLOR_WARN=''
readonly COLOR_ERROR=''
readonly COLOR_RESET=''
fi
#
@ -48,7 +52,7 @@ fi
# Usage: log_info "message"
#
log_info() {
echo -e "${COLOR_INFO}[INFO]${COLOR_RESET} $*" >&2
echo -e "${COLOR_INFO}[INFO]${COLOR_RESET} $*" >&2
}
#
@ -56,7 +60,7 @@ log_info() {
# Usage: log_warn "message"
#
log_warn() {
echo -e "${COLOR_WARN}[WARN]${COLOR_RESET} $*" >&2
echo -e "${COLOR_WARN}[WARN]${COLOR_RESET} $*" >&2
}
#
@ -64,7 +68,7 @@ log_warn() {
# Usage: log_error "message"
#
log_error() {
echo -e "${COLOR_ERROR}[ERROR]${COLOR_RESET} $*" >&2
echo -e "${COLOR_ERROR}[ERROR]${COLOR_RESET} $*" >&2
}
#
@ -72,8 +76,8 @@ log_error() {
# Usage: die "error message"
#
die() {
log_error "$@"
exit 1
log_error "$@"
exit 1
}
#
@ -81,14 +85,14 @@ die() {
# Usage: ensure_dirs
#
ensure_dirs() {
mkdir -p "$QVM_DATA_DIR" \
"$QVM_STATE_DIR" \
"$QVM_CACHE_DIR" \
"$QVM_CONFIG_DIR" \
"$QVM_CARGO_HOME" \
"$QVM_CARGO_TARGET" \
"$QVM_PNPM_STORE" \
"$QVM_SCCACHE"
mkdir -p "$QVM_DATA_DIR" \
"$QVM_STATE_DIR" \
"$QVM_CACHE_DIR" \
"$QVM_CONFIG_DIR" \
"$QVM_CARGO_HOME" \
"$QVM_CARGO_TARGET" \
"$QVM_PNPM_STORE" \
"$QVM_SCCACHE"
}
#
@ -97,21 +101,21 @@ ensure_dirs() {
# Usage: if is_vm_running; then ... fi
#
is_vm_running() {
if [[ ! -f "$QVM_PID_FILE" ]]; then
return 1
fi
local pid
pid=$(cat "$QVM_PID_FILE")
# Check if process exists and is a QEMU process
if kill -0 "$pid" 2>/dev/null; then
return 0
else
# Stale PID file, remove it
rm -f "$QVM_PID_FILE"
return 1
fi
if [[ ! -f "$QVM_PID_FILE" ]]; then
return 1
fi
local pid
pid=$(cat "$QVM_PID_FILE")
# Check if process exists and is a QEMU process
if kill -0 "$pid" 2>/dev/null; then
return 0
else
# Stale PID file, remove it
rm -f "$QVM_PID_FILE"
return 1
fi
}
#
@ -120,10 +124,10 @@ is_vm_running() {
# Usage: port=$(get_ssh_port)
#
get_ssh_port() {
if [[ ! -f "$QVM_SSH_PORT_FILE" ]]; then
die "SSH port file not found. Is the VM running?"
fi
cat "$QVM_SSH_PORT_FILE"
if [[ ! -f "$QVM_SSH_PORT_FILE" ]]; then
die "SSH port file not found. Is the VM running?"
fi
cat "$QVM_SSH_PORT_FILE"
}
#
@ -133,8 +137,8 @@ get_ssh_port() {
# Usage: hash=$(workspace_hash "/path/to/workspace")
#
workspace_hash() {
local path="$1"
echo -n "$path" | sha256sum | cut -c1-8
local path="$1"
echo -n "$path" | sha256sum | cut -c1-8
}
#
@ -145,25 +149,36 @@ workspace_hash() {
# Usage: wait_for_ssh "$port" 30
#
wait_for_ssh() {
local port="${1:-}"
local timeout="${2:-60}"
local elapsed=0
if [[ -z "$port" ]]; then
die "wait_for_ssh requires port argument"
fi
log_info "Waiting for SSH on port $port (timeout: ${timeout}s)..."
while (( elapsed < timeout )); do
if nc -z -w 1 localhost "$port" 2>/dev/null; then
log_info "SSH is ready"
return 0
fi
sleep 1
(( elapsed++ ))
done
log_error "SSH did not become available within ${timeout}s"
return 1
local port="${1:-}"
local timeout="${2:-60}"
local elapsed=0
if [[ -z "$port" ]]; then
die "wait_for_ssh requires port argument"
fi
log_info "Waiting for SSH on port $port (timeout: ${timeout}s)..."
while ((elapsed < timeout)); do
# Actually attempt SSH connection to verify sshd is responding
# nc -z only checks if port is open (QEMU opens it immediately)
# We need to verify sshd is actually ready to accept connections
if timeout 2 sshpass -p root ssh \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o LogLevel=ERROR \
-o PubkeyAuthentication=no \
-o PasswordAuthentication=yes \
-o ConnectTimeout=1 \
-p "$port" \
root@localhost "true" 2>/dev/null; then
log_info "SSH is ready"
return 0
fi
sleep 1
((elapsed++))
done
log_error "SSH did not become available within ${timeout}s"
return 1
}