Add initial QVM CLI, Nix flake, scripts and README

This commit is contained in:
Joshua Bell 2026-01-26 00:16:18 -06:00
parent 25b1cca0e6
commit 8534f7efb9
14 changed files with 2359 additions and 0 deletions

94
bin/qvm Executable file
View file

@ -0,0 +1,94 @@
#!/usr/bin/env bash
#
# qvm - Main dispatcher for QVM (QEMU Development VM) commands
#
# This script routes subcommands to their respective qvm-* implementations.
# It sources common.sh for shared configuration and utility functions.
#
set -euo pipefail
# Source common library (use QVM_LIB_DIR from wrapper or relative path for dev)
source "${QVM_LIB_DIR:-$(dirname "$0")/../lib}/common.sh"
readonly VERSION="0.1.0"
#
# show_help - Display usage information
#
show_help() {
cat <<EOF
qvm - QEMU Development VM Manager
USAGE:
qvm <command> [args...]
COMMANDS:
start Start the VM (create if needed)
stop Stop the running VM
run Execute a command in the VM
ssh Open SSH session or run command in VM
status Show VM status and information
rebuild Rebuild the base VM image from flake
reset Delete overlay and start fresh (keeps base image)
OPTIONS:
-h, --help Show this help message
-v, --version Show version information
EXAMPLES:
qvm start Start the VM
qvm ssh Open interactive SSH session
qvm run 'ls -la' Run command in VM
qvm status Check if VM is running
qvm stop Stop the VM
For more information on a specific command, run:
qvm <command> --help
EOF
}
#
# show_version - Display version information
#
show_version() {
echo "qvm version ${VERSION}"
}
#
# main - Parse arguments and route to subcommand
#
main() {
# Handle no arguments
if [[ $# -eq 0 ]]; then
show_help
exit 0
fi
local subcommand="$1"
shift
case "$subcommand" in
start|stop|run|ssh|status|rebuild|reset)
# Route to the appropriate qvm-* script
# Use exec to replace this process with the subcommand
exec "qvm-${subcommand}" "$@"
;;
help|--help|-h)
show_help
exit 0
;;
--version|-v)
show_version
exit 0
;;
*)
log_error "Unknown command: ${subcommand}"
echo "" >&2
show_help >&2
exit 1
;;
esac
}
main "$@"

137
bin/qvm-rebuild Executable file
View file

@ -0,0 +1,137 @@
#!/usr/bin/env bash
#
# qvm-rebuild - Build the base qcow2 image from user's flake
#
# This script builds the QVM base image by:
# - Ensuring a user flake exists (copying default if needed)
# - Running nix build on the user's flake configuration
# - Copying the resulting qcow2 to the base image location
# - Optionally warning if VM is running
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# ensure_user_flake - Ensure user's flake exists, copy default if missing
#
ensure_user_flake() {
if [[ -f "$QVM_USER_FLAKE/flake.nix" ]]; then
log_info "Using existing user flake: $QVM_USER_FLAKE/flake.nix"
return 0
fi
log_info "User flake not found, copying default template..."
# Determine default flake location
# In installed version: $QVM_LIB_DIR/../flake/default-vm/
# In development: $(dirname "$0")/../flake/default-vm/
local default_flake_dir="$QVM_LIB_DIR/../flake/default-vm"
if [[ ! -d "$default_flake_dir" ]]; then
die "Default flake template not found at: $default_flake_dir"
fi
if [[ ! -f "$default_flake_dir/flake.nix" ]]; then
die "Default flake.nix not found at: $default_flake_dir/flake.nix"
fi
# Create user flake directory and copy template
mkdir -p "$QVM_USER_FLAKE"
cp -r "$default_flake_dir"/* "$QVM_USER_FLAKE/"
log_info "Default flake copied to: $QVM_USER_FLAKE"
echo ""
echo "You can customize your VM by editing: $QVM_USER_FLAKE/flake.nix"
echo ""
}
#
# build_base_image - Build the base image using nix
#
build_base_image() {
log_info "Building base image from flake..."
# Build the qcow2 output from user's flake
local build_result="$QVM_STATE_DIR/result"
if ! nix build "$QVM_USER_FLAKE#qcow2" --out-link "$build_result"; then
die "Failed to build base image. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix"
fi
# Verify the result contains nixos.qcow2
local qcow2_path="$build_result/nixos.qcow2"
if [[ ! -f "$qcow2_path" ]]; then
die "Build succeeded but nixos.qcow2 not found at: $qcow2_path"
fi
# Copy the qcow2 to base image location
log_info "Copying image to: $QVM_BASE_IMAGE"
cp -L "$qcow2_path" "$QVM_BASE_IMAGE"
# Remove the result symlink
rm -f "$build_result"
# Get image size for informational output
local image_size
image_size=$(du -h "$QVM_BASE_IMAGE" | cut -f1)
log_info "Base image built successfully"
echo ""
echo "Base image: $QVM_BASE_IMAGE"
echo "Image size: $image_size"
}
#
# warn_if_running - Warn user if VM is currently running
#
warn_if_running() {
if is_vm_running; then
log_warn "VM is currently running"
echo ""
echo "The new base image will only take effect after restarting the VM:"
echo " qvm stop"
echo " qvm start"
echo ""
echo "Note: Changes to your VM overlay will be preserved."
echo " Use 'qvm reset' to start fresh with the new base image."
echo ""
fi
}
#
# main - Main execution flow
#
main() {
log_info "Rebuilding QVM base image..."
# Ensure required directories exist
ensure_dirs
# Ensure user has a flake configuration
ensure_user_flake
# Build the base image
build_base_image
# Warn if VM is running
warn_if_running
# Print next steps
echo "Next steps:"
if is_vm_running; then
echo " 1. Stop the VM: qvm stop"
echo " 2. Start the VM: qvm start"
else
echo " - Start the VM: qvm start"
fi
echo " - Customize the VM: edit $QVM_USER_FLAKE/flake.nix"
echo " - Reset to fresh state: qvm reset"
}
# Run main function
main "$@"

116
bin/qvm-reset Executable file
View file

@ -0,0 +1,116 @@
#!/usr/bin/env bash
#
# qvm-reset - Wipe VM overlay and workspace registry
#
# This script resets the VM to a clean state by deleting the overlay.qcow2
# and workspaces.json files. This is useful when you want to start fresh
# or if the VM state has become corrupted.
#
# IMPORTANT: This does NOT delete the base image (base.qcow2), so you won't
# need to re-download or rebuild the NixOS image.
#
# Usage: qvm reset [-f|--force]
# -f, --force Skip confirmation prompt
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Get path to qvm-stop script
readonly QVM_BIN_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly QVM_STOP="${QVM_BIN_DIR}/qvm-stop"
#
# confirm_reset - Prompt user for confirmation
# Returns: 0 if user confirms, exits script if user cancels
#
confirm_reset() {
echo
log_warn "This will delete:"
if [[ -f "$QVM_OVERLAY" ]]; then
echo " - $QVM_OVERLAY"
fi
if [[ -f "$QVM_WORKSPACES_FILE" ]]; then
echo " - $QVM_WORKSPACES_FILE"
fi
if [[ ! -f "$QVM_OVERLAY" ]] && [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "No files to delete (already clean)"
exit 0
fi
echo
echo "The base image (base.qcow2) and cache directories will NOT be deleted."
echo
read -p "Continue with reset? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Reset cancelled"
exit 0
fi
}
#
# main - Main reset orchestration
#
main() {
local force=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-f|--force)
force=true
shift
;;
*)
die "Unknown option: $1"
;;
esac
done
# Confirm unless --force is used
if [[ "$force" != true ]]; then
confirm_reset
fi
# Stop VM if running
if is_vm_running; then
log_info "VM is running, stopping it first..."
"$QVM_STOP"
fi
# Delete overlay if it exists
if [[ -f "$QVM_OVERLAY" ]]; then
log_info "Deleting overlay: $QVM_OVERLAY"
rm -f "$QVM_OVERLAY"
else
log_info "Overlay does not exist (nothing to delete)"
fi
# Delete workspaces registry if it exists
if [[ -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "Deleting workspaces registry: $QVM_WORKSPACES_FILE"
rm -f "$QVM_WORKSPACES_FILE"
else
log_info "Workspaces registry does not exist (nothing to delete)"
fi
# Print success message with next steps
echo
log_info "Reset complete!"
echo
echo "Next steps:"
echo " - Run 'qvm start' to boot the VM with a fresh overlay"
echo " - Your base image (base.qcow2) is still intact"
echo " - Cache directories (cargo-home, pnpm-store, etc.) are preserved"
echo
}
main "$@"

235
bin/qvm-run Executable file
View file

@ -0,0 +1,235 @@
#!/usr/bin/env bash
#
# qvm-run - Execute a command in the VM workspace
#
# This script:
# 1. Ensures VM is running (auto-starts if needed)
# 2. Registers current $PWD as a workspace in workspaces.json
# 3. SSHes into VM and executes command in workspace mount point
# 4. Streams output and preserves exit code
#
# Usage: qvm-run <command> [args...]
#
# Notes:
# - Workspaces are pre-mounted at VM start time (no dynamic 9p hotplug)
# - If workspace not already registered, warns to restart VM
# - Uses workspace hash for mount tag and path
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# show_usage - Display usage information
#
show_usage() {
cat <<EOF
Usage: qvm run <command> [args...]
Execute a command in the VM at the current workspace directory.
The current directory (\$PWD) is automatically registered as a workspace
and mounted into the VM. Commands run in the mounted workspace directory.
Examples:
qvm run cargo build
qvm run npm install
qvm run ls -la
qvm run bash -c "pwd && ls"
Notes:
- Workspaces are mounted at VM start time
- If this workspace is new, you'll need to restart the VM
- Command output streams to your terminal
- Exit code matches the command's exit code
EOF
}
#
# register_workspace - Add workspace to registry if not already present
# Args: $1 - absolute path to workspace
# $2 - workspace hash
# Returns: 0 if already registered, 1 if newly added (requires VM restart)
#
register_workspace() {
local workspace_path="$1"
local hash="$2"
local mount_tag="ws_${hash}"
local guest_path="/workspace/${hash}"
# Create workspaces.json if it doesn't exist
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
echo '[]' > "$QVM_WORKSPACES_FILE"
fi
# Check if workspace already registered
if jq -e --arg path "$workspace_path" '.[] | select(.host_path == $path)' "$QVM_WORKSPACES_FILE" >/dev/null 2>&1; then
log_info "Workspace already registered: $workspace_path"
return 0
fi
# Add new workspace to registry
log_info "Registering new workspace: $workspace_path"
local temp_file
temp_file=$(mktemp)
jq --arg path "$workspace_path" \
--arg hash "$hash" \
--arg tag "$mount_tag" \
--arg guest "$guest_path" \
'. += [{
host_path: $path,
hash: $hash,
mount_tag: $tag,
guest_path: $guest
}]' "$QVM_WORKSPACES_FILE" > "$temp_file"
mv "$temp_file" "$QVM_WORKSPACES_FILE"
log_info "Workspace registered as $mount_tag -> $guest_path"
return 1 # Indicate new workspace added
}
#
# is_workspace_mounted - Check if workspace is actually mounted in VM
# Args: $1 - SSH port
# $2 - guest path
# Returns: 0 if mounted, 1 if not
#
is_workspace_mounted() {
local ssh_port="$1"
local guest_path="$2"
# SSH into VM and check if guest path exists and is a directory
if ssh -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o LogLevel=ERROR \
-p "$ssh_port" \
root@localhost \
"test -d '$guest_path'" 2>/dev/null; then
return 0
else
return 1
fi
}
#
# main - Main execution flow
#
main() {
# Show usage if no arguments
if [[ $# -eq 0 ]]; then
show_usage
exit 1
fi
# Handle help flags
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
show_usage
exit 0
fi
# Get current workspace (absolute path)
local workspace_path
workspace_path="$(pwd)"
# Generate workspace hash
local hash
hash=$(workspace_hash "$workspace_path")
local guest_path="/workspace/${hash}"
log_info "Workspace: $workspace_path"
log_info "Guest path: $guest_path"
# Register workspace in registry
local newly_added=0
if ! register_workspace "$workspace_path" "$hash"; then
newly_added=1
fi
# Ensure VM is running
if ! is_vm_running; then
log_info "VM not running, starting..."
# Path to qvm-start script
local qvm_start="${QVM_LIB_DIR}/../bin/qvm-start"
if ! "$qvm_start"; then
die "Failed to start VM"
fi
fi
# Get SSH port
local ssh_port
ssh_port=$(get_ssh_port)
# Check if workspace is actually mounted in VM
if ! is_workspace_mounted "$ssh_port" "$guest_path"; then
log_error "Workspace not mounted in VM"
echo ""
echo "This workspace was just registered but is not mounted in the VM."
echo "Workspaces must be mounted at VM start time."
echo ""
echo "Please restart the VM to mount this workspace:"
echo " qvm stop"
echo " qvm start"
echo ""
echo "Then try your command again."
exit 1
fi
# Build SSH command
# - Use -t if stdin is a TTY (for interactive commands)
# - Suppress SSH warnings (ephemeral VM, host keys change)
# - cd to guest path and execute command
local ssh_cmd=(
ssh
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o LogLevel=ERROR
-p "$ssh_port"
)
# Add -t flag if stdin is a TTY
if [[ -t 0 ]]; then
ssh_cmd+=(-t)
fi
# Add connection target
ssh_cmd+=(root@localhost)
# Build remote command: cd to workspace and execute user's command
# Quote each argument properly to handle spaces and special chars
local remote_cmd="cd '$guest_path' && "
# Append user's command with proper quoting
local first_arg=1
for arg in "$@"; do
if [[ $first_arg -eq 1 ]]; then
remote_cmd+="$arg"
first_arg=0
else
# Quote arguments that contain spaces or special characters
if [[ "$arg" =~ [[:space:]] ]]; then
remote_cmd+=" '$arg'"
else
remote_cmd+=" $arg"
fi
fi
done
# Add the remote command as final SSH argument
ssh_cmd+=("$remote_cmd")
# Execute SSH command (replaces current process)
exec "${ssh_cmd[@]}"
}
# Run main function
main "$@"

118
bin/qvm-ssh Executable file
View file

@ -0,0 +1,118 @@
#!/usr/bin/env bash
#
# qvm-ssh - Direct SSH access to the VM
#
# This script provides SSH access to the running VM:
# - Auto-starts VM if not running
# - Interactive shell by default (detects TTY)
# - Single command execution with -c flag
# - Passes through additional SSH arguments
# - Uses StrictHostKeyChecking=no for host key management
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# show_usage - Display help text
#
show_usage() {
cat <<EOF
Usage: qvm ssh [OPTIONS] [SSH_ARGS...]
Open an SSH session to the VM or run a single command.
OPTIONS:
-c COMMAND Run a single command instead of interactive shell
-h, --help Show this help message
EXAMPLES:
qvm ssh # Open interactive shell
qvm ssh -c "ls -la" # Run single command
qvm ssh -c "pwd" -v # Run command with SSH verbose flag
NOTES:
- VM will auto-start if not running
- SSH connects as root@localhost
- Host key checking is disabled (VM overlay is ephemeral)
- Additional SSH arguments are passed through
EOF
}
#
# main - Main execution flow
#
main() {
local command_mode=false
local command=""
local ssh_args=()
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
show_usage
exit 0
;;
-c)
if [[ $# -lt 2 ]]; then
die "Option -c requires a command argument"
fi
command_mode=true
command="$2"
shift 2
;;
*)
# Collect remaining arguments for SSH
ssh_args+=("$1")
shift
;;
esac
done
# Ensure VM is running (auto-start if needed)
if ! is_vm_running; then
log_info "VM is not running, starting it..."
"$QVM_LIB_DIR/../bin/qvm-start"
fi
# Get SSH port
local port
port=$(get_ssh_port)
# Build SSH command
local ssh_cmd=(
ssh
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o LogLevel=ERROR # Suppress host key warnings
-p "$port"
root@localhost
)
# Add TTY flag for interactive sessions (not in command mode)
if [[ "$command_mode" = false ]] && [[ -t 0 ]]; then
ssh_cmd+=(-t)
fi
# Add any pass-through SSH arguments
if [[ ${#ssh_args[@]} -gt 0 ]]; then
ssh_cmd+=("${ssh_args[@]}")
fi
# Add command if in command mode
if [[ "$command_mode" = true ]]; then
ssh_cmd+=("$command")
fi
# Execute SSH (replace shell process)
exec "${ssh_cmd[@]}"
}
# Run main function
main "$@"

224
bin/qvm-start Executable file
View file

@ -0,0 +1,224 @@
#!/usr/bin/env bash
#
# qvm-start - Launch the QEMU VM with all required configuration
#
# This script starts the QVM virtual machine with:
# - KVM acceleration and host CPU passthrough
# - Configurable memory and CPU count
# - Overlay disk backed by base.qcow2 (copy-on-write)
# - SSH port forwarding on auto-selected port
# - 9p mounts for shared caches (cargo, pnpm, sccache)
# - Serial console logging
# - Daemonized execution with PID file
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# find_available_port - Find an available TCP port starting from base
# Args: $1 - starting port number (default: 2222)
# Returns: available port number on stdout
#
find_available_port() {
local port="${1:-2222}"
local max_attempts=100
local attempt=0
while (( attempt < max_attempts )); do
if ! nc -z localhost "$port" 2>/dev/null; then
echo "$port"
return 0
fi
(( port++ ))
(( attempt++ ))
done
die "Could not find available port after $max_attempts attempts"
}
#
# mount_workspaces - Add virtfs entries for registered workspaces
# Args: $1 - name of array variable to append to
# Usage: mount_workspaces qemu_cmd
#
mount_workspaces() {
local -n cmd_array=$1
# Check if workspaces registry exists
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "No workspaces registry found, skipping workspace mounts"
return 0
fi
# Check if file is empty or invalid JSON
if [[ ! -s "$QVM_WORKSPACES_FILE" ]]; then
log_info "Workspaces registry is empty, skipping workspace mounts"
return 0
fi
# Parse workspaces and add virtfs entries
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ "$workspace_count" -eq 0 ]]; then
log_info "No workspaces registered, skipping workspace mounts"
return 0
fi
log_info "Mounting $workspace_count workspace(s)..."
# Iterate through workspaces and add virtfs entries
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
if [[ -z "$path" || -z "$mount_tag" || "$path" == "null" || "$mount_tag" == "null" ]]; then
log_warn "Skipping invalid workspace entry at index $i"
(( i++ ))
continue
fi
# Verify path exists
if [[ ! -d "$path" ]]; then
log_warn "Workspace path does not exist: $path (skipping)"
(( i++ ))
continue
fi
log_info " - $path -> $mount_tag"
cmd_array+=(-virtfs "local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr,trans=virtio,version=9p2000.L,msize=104857600")
(( i++ ))
done
}
#
# cleanup_on_failure - Clean up state files if VM start fails
#
cleanup_on_failure() {
log_warn "Cleaning up after failed start..."
rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE"
}
#
# main - Main execution flow
#
main() {
log_info "Starting QVM..."
# Check if VM is already running
if is_vm_running; then
log_info "VM is already running"
local port
port=$(get_ssh_port)
echo "SSH available on port: $port"
echo "Use 'qvm ssh' to connect or 'qvm status' for details"
exit 0
fi
# First-run initialization
ensure_dirs
if [[ ! -f "$QVM_BASE_IMAGE" ]]; then
log_info "First run detected - building base image..."
log_info "This may take several minutes."
# Call qvm-rebuild to build the image
SCRIPT_DIR="$(dirname "$0")"
if ! "$SCRIPT_DIR/qvm-rebuild"; then
die "Failed to build base image. Run 'qvm rebuild' manually to debug."
fi
fi
# Create overlay image if it doesn't exist
if [[ ! -f "$QVM_OVERLAY" ]]; then
log_info "Creating overlay disk..."
if ! qemu-img create -f qcow2 -b "$QVM_BASE_IMAGE" -F qcow2 "$QVM_OVERLAY"; then
die "Failed to create overlay disk"
fi
else
log_info "Using existing overlay disk"
fi
# Find available SSH port
local ssh_port
ssh_port=$(find_available_port 2222)
log_info "Using SSH port: $ssh_port"
# Get memory and CPU settings from environment or use defaults
local memory="${QVM_MEMORY:-8G}"
local cpus="${QVM_CPUS:-4}"
log_info "VM resources: ${memory} memory, ${cpus} CPUs"
# Build QEMU command
local qemu_cmd=(
qemu-system-x86_64
-enable-kvm
-cpu host
-m "$memory"
-smp "$cpus"
# Overlay disk (virtio for performance)
-drive "file=$QVM_OVERLAY,if=virtio,format=qcow2"
# User-mode networking with SSH port forward
-netdev "user,id=net0,hostfwd=tcp::${ssh_port}-:22"
-device "virtio-net-pci,netdev=net0"
# 9p mounts for shared caches (security_model=mapped-xattr for proper permissions)
-virtfs "local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr,trans=virtio,version=9p2000.L,msize=104857600"
-virtfs "local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr,trans=virtio,version=9p2000.L,msize=104857600"
-virtfs "local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr,trans=virtio,version=9p2000.L,msize=104857600"
-virtfs "local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr,trans=virtio,version=9p2000.L,msize=104857600"
)
# Add workspace mounts from registry
mount_workspaces qemu_cmd
# Continue building QEMU command
qemu_cmd+=(
# Serial console to log file
-serial "file:$QVM_SERIAL_LOG"
# No graphics
-nographic
# Daemonize with PID file
-daemonize
-pidfile "$QVM_PID_FILE"
)
# Launch QEMU
log_info "Launching QEMU..."
if ! "${qemu_cmd[@]}"; then
cleanup_on_failure
die "Failed to start QEMU"
fi
# Save SSH port to file
echo "$ssh_port" > "$QVM_SSH_PORT_FILE"
# Wait for SSH to become available
if ! wait_for_ssh "$ssh_port" 60; then
cleanup_on_failure
die "VM started but SSH did not become available"
fi
# Success!
log_info "VM started successfully"
echo ""
echo "SSH available on port: $ssh_port"
echo "Connect with: qvm ssh"
echo "Check status: qvm status"
echo "Serial log: $QVM_SERIAL_LOG"
}
# Run main function
main "$@"

228
bin/qvm-status Executable file
View file

@ -0,0 +1,228 @@
#!/usr/bin/env bash
#
# qvm-status - Display VM state, configuration, and connection information
#
# Shows current VM status including:
# - Running state (PID, uptime, SSH port)
# - Mounted workspaces from workspaces.json
# - Cache directory status
# - Base image and overlay details
# - Connection hints for SSH and run commands
#
# Exit codes:
# 0 - VM is running
# 1 - VM is stopped
set -euo pipefail
# Source common library for shared functions and constants
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$0")/../lib}"
source "${QVM_LIB_DIR}/common.sh"
# Additional color codes for status display
if [[ -t 1 ]]; then
readonly COLOR_SUCCESS='\033[0;32m' # Green
readonly COLOR_HEADER='\033[1;37m' # Bold White
readonly COLOR_DIM='\033[0;90m' # Dim Gray
else
readonly COLOR_SUCCESS=''
readonly COLOR_HEADER=''
readonly COLOR_DIM=''
fi
#
# format_bytes - Convert bytes to human-readable format
# Args: $1 - size in bytes
# Returns: formatted string (e.g., "1.5G", "256M", "4.0K")
#
format_bytes() {
local bytes="$1"
if (( bytes >= 1073741824 )); then
printf "%.1fG" "$(echo "scale=1; $bytes / 1073741824" | bc)"
elif (( bytes >= 1048576 )); then
printf "%.1fM" "$(echo "scale=1; $bytes / 1048576" | bc)"
elif (( bytes >= 1024 )); then
printf "%.1fK" "$(echo "scale=1; $bytes / 1024" | bc)"
else
printf "%dB" "$bytes"
fi
}
#
# get_uptime - Calculate VM uptime from PID
# Args: $1 - process PID
# Returns: uptime string (e.g., "2h 15m", "45m", "30s")
#
get_uptime() {
local pid="$1"
# Get process start time in seconds since epoch
local start_time
start_time=$(ps -p "$pid" -o lstart= 2>/dev/null | xargs -I{} date -d "{}" +%s)
if [[ -z "$start_time" ]]; then
echo "unknown"
return
fi
local current_time
current_time=$(date +%s)
local uptime_seconds=$((current_time - start_time))
# Format uptime
local hours=$((uptime_seconds / 3600))
local minutes=$(( (uptime_seconds % 3600) / 60 ))
local seconds=$((uptime_seconds % 60))
if (( hours > 0 )); then
printf "%dh %dm" "$hours" "$minutes"
elif (( minutes > 0 )); then
printf "%dm %ds" "$minutes" "$seconds"
else
printf "%ds" "$seconds"
fi
}
#
# show_file_info - Display file status with size and modification time
# Args: $1 - file path
# $2 - label (e.g., "Base Image")
#
show_file_info() {
local file="$1"
local label="$2"
if [[ -f "$file" ]]; then
local size_bytes
size_bytes=$(stat -c %s "$file" 2>/dev/null || echo "0")
local size_human
size_human=$(format_bytes "$size_bytes")
local mod_time
mod_time=$(stat -c %y "$file" 2>/dev/null | cut -d. -f1)
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $label: ${COLOR_INFO}$size_human${COLOR_RESET} ${COLOR_DIM}(modified: $mod_time)${COLOR_RESET}"
else
echo -e " ${COLOR_WARN}✗${COLOR_RESET} $label: ${COLOR_WARN}missing${COLOR_RESET}"
fi
}
#
# show_dir_info - Display directory status
# Args: $1 - directory path
# $2 - label (e.g., "Cargo Home")
#
show_dir_info() {
local dir="$1"
local label="$2"
if [[ -d "$dir" ]]; then
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $label: ${COLOR_DIM}$dir${COLOR_RESET}"
else
echo -e " ${COLOR_WARN}✗${COLOR_RESET} $label: ${COLOR_WARN}not created${COLOR_RESET}"
fi
}
#
# show_workspaces - Display mounted workspaces from workspaces.json
#
show_workspaces() {
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
echo -e " ${COLOR_DIM}No workspaces mounted${COLOR_RESET}"
return
fi
# Check if file is valid JSON and has workspaces
local workspace_count
workspace_count=$(jq 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if (( workspace_count == 0 )); then
echo -e " ${COLOR_DIM}No workspaces mounted${COLOR_RESET}"
return
fi
# Parse and display each workspace
jq -r 'to_entries[] | "\(.key)|\(.value.host_path)|\(.value.guest_path)"' "$QVM_WORKSPACES_FILE" 2>/dev/null | while IFS='|' read -r hash host_path guest_path; do
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $hash: ${COLOR_INFO}$host_path${COLOR_RESET} → ${COLOR_DIM}$guest_path${COLOR_RESET}"
done
}
#
# main - Main status display logic
#
main() {
# Header
echo -e "${COLOR_HEADER}QVM Status${COLOR_RESET}"
echo ""
# VM State
echo -e "${COLOR_HEADER}VM State:${COLOR_RESET}"
if is_vm_running; then
local pid
pid=$(cat "$QVM_PID_FILE")
local ssh_port
if [[ -f "$QVM_SSH_PORT_FILE" ]]; then
ssh_port=$(cat "$QVM_SSH_PORT_FILE")
else
ssh_port="unknown"
fi
local uptime
uptime=$(get_uptime "$pid")
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} Running"
echo -e " ${COLOR_DIM}PID:${COLOR_RESET} $pid"
echo -e " ${COLOR_DIM}SSH:${COLOR_RESET} localhost:$ssh_port"
echo -e " ${COLOR_DIM}Uptime:${COLOR_RESET} $uptime"
else
echo -e " ${COLOR_WARN}✗${COLOR_RESET} Stopped"
fi
echo ""
# Workspaces
echo -e "${COLOR_HEADER}Mounted Workspaces:${COLOR_RESET}"
show_workspaces
echo ""
# Cache Directories
echo -e "${COLOR_HEADER}Cache Directories:${COLOR_RESET}"
show_dir_info "$QVM_CARGO_HOME" "Cargo Home"
show_dir_info "$QVM_CARGO_TARGET" "Cargo Target"
show_dir_info "$QVM_PNPM_STORE" "PNPM Store"
show_dir_info "$QVM_SCCACHE" "SCCache"
echo ""
# VM Images
echo -e "${COLOR_HEADER}VM Images:${COLOR_RESET}"
show_file_info "$QVM_BASE_IMAGE" "Base Image"
show_file_info "$QVM_OVERLAY" "Overlay"
echo ""
# Connection Hints (only if VM is running)
if is_vm_running; then
local ssh_port
ssh_port=$(cat "$QVM_SSH_PORT_FILE" 2>/dev/null || echo "unknown")
echo -e "${COLOR_HEADER}Connection:${COLOR_RESET}"
echo -e " ${COLOR_INFO}SSH:${COLOR_RESET} qvm ssh"
echo -e " ${COLOR_INFO}Run cmd:${COLOR_RESET} qvm run <command>"
echo -e " ${COLOR_INFO}Direct:${COLOR_RESET} ssh -p $ssh_port root@localhost"
echo ""
# Exit success if running
exit 0
else
echo -e "${COLOR_HEADER}Quick Start:${COLOR_RESET}"
echo -e " ${COLOR_INFO}Start VM:${COLOR_RESET} qvm start"
echo ""
# Exit failure if stopped
exit 1
fi
}
# Execute main
main "$@"

94
bin/qvm-stop Executable file
View file

@ -0,0 +1,94 @@
#!/usr/bin/env bash
#
# qvm-stop - Gracefully shut down the QEMU VM
#
# This script stops the running VM by sending SIGTERM first for graceful
# shutdown, waiting up to 30 seconds, then sending SIGKILL if necessary.
# It cleans up state files (vm.pid, ssh.port) after shutdown completes.
#
# Usage: qvm stop
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Timeout for graceful shutdown in seconds
readonly SHUTDOWN_TIMEOUT=30
#
# wait_for_process_exit - Wait for process to terminate
# Args: $1 - PID to wait for
# $2 - timeout in seconds
# Returns: 0 if process exits, 1 on timeout
#
wait_for_process_exit() {
local pid="$1"
local timeout="$2"
local elapsed=0
while (( elapsed < timeout )); do
if ! kill -0 "$pid" 2>/dev/null; then
return 0
fi
sleep 1
(( elapsed++ ))
done
return 1
}
#
# main - Main shutdown orchestration
#
main() {
# Check if VM is running
if ! is_vm_running; then
log_info "VM is not running"
exit 0
fi
# Get VM process PID
local vm_pid
vm_pid=$(cat "$QVM_PID_FILE")
log_info "Shutting down VM (PID: $vm_pid)..."
# Send SIGTERM for graceful shutdown
if kill -TERM "$vm_pid" 2>/dev/null; then
log_info "Sent SIGTERM, waiting up to ${SHUTDOWN_TIMEOUT}s for graceful shutdown..."
if wait_for_process_exit "$vm_pid" "$SHUTDOWN_TIMEOUT"; then
log_info "VM shut down gracefully"
else
log_warn "Graceful shutdown timeout, forcefully terminating..."
# Send SIGKILL to force termination
if kill -KILL "$vm_pid" 2>/dev/null; then
# Wait briefly to ensure process is dead
sleep 1
# Verify process is actually dead
if kill -0 "$vm_pid" 2>/dev/null; then
die "Failed to kill VM process $vm_pid"
fi
log_info "VM forcefully terminated"
else
log_warn "Process $vm_pid already terminated"
fi
fi
else
log_warn "Process $vm_pid already terminated (could not send SIGTERM)"
fi
# Clean up state files
log_info "Cleaning up state files..."
rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE"
log_info "VM stopped successfully"
}
main "$@"