qvm/bin/qvm-start

215 lines
6.8 KiB
Bash
Executable file

#!/usr/bin/env bash
#
# qvm-start - Launch the QVM using the NixOS VM runner
#
# This script starts the QVM virtual machine by:
# - Building the VM if not already built
# - Configuring QEMU options via environment variables
# - Adding 9p mounts for caches and workspaces
# - Starting the VM in the background
# - Waiting for SSH to become available
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# find_available_port - Find an available TCP port starting from base
#
find_available_port() {
local port="${1:-2222}"
local max_attempts=100
local attempt=0
while (( attempt < max_attempts )); do
if ! nc -z localhost "$port" 2>/dev/null; then
echo "$port"
return 0
fi
(( port++ )) || true
(( attempt++ )) || true
done
die "Could not find available port after $max_attempts attempts"
}
#
# build_qemu_opts - Build QEMU_OPTS environment variable with 9p mounts
#
build_qemu_opts() {
local ssh_port="$1"
local opts=""
# 9p mounts for shared caches
opts+="-virtfs local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr "
# Mount host opencode config if it exists
if [[ -d "$QVM_HOST_OPENCODE_CONFIG" ]]; then
log_info "Adding opencode config mount..."
opts+="-virtfs local,path=$QVM_HOST_OPENCODE_CONFIG,mount_tag=opencode_config,security_model=mapped-xattr "
fi
# Add workspace mounts from registry
if [[ -f "$QVM_WORKSPACES_FILE" && -s "$QVM_WORKSPACES_FILE" ]]; then
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ "$workspace_count" -gt 0 ]]; then
log_info "Adding $workspace_count workspace mount(s)..."
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
if [[ -n "$path" && -n "$mount_tag" && "$path" != "null" && "$mount_tag" != "null" && -d "$path" ]]; then
log_info " - $path -> $mount_tag"
opts+="-virtfs local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr "
fi
(( i++ )) || true
done
fi
fi
# Serial console to log file and daemonize
opts+="-serial file:$QVM_SERIAL_LOG "
opts+="-display none "
opts+="-daemonize "
opts+="-pidfile $QVM_PID_FILE "
echo "$opts"
}
#
# cleanup_on_failure - Clean up state files if VM start fails
#
cleanup_on_failure() {
log_warn "Cleaning up after failed start..."
rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE"
}
#
# main - Main execution flow
#
main() {
log_info "Starting QVM..."
# Check if VM is already running
if is_vm_running; then
log_info "VM is already running"
local port
port=$(get_ssh_port)
echo "SSH available on port: $port"
echo "Use 'qvm ssh' to connect or 'qvm status' for details"
exit 0
fi
# First-run initialization
ensure_dirs
# Source config file if it exists (sets QVM_MEMORY, QVM_CPUS, etc.)
# Check system-wide config first, then user config (user overrides system)
if [[ -f "/etc/xdg/qvm/qvm.conf" ]]; then
source "/etc/xdg/qvm/qvm.conf"
fi
if [[ -f "$QVM_CONFIG_FILE" ]]; then
source "$QVM_CONFIG_FILE"
fi
# Check if VM runner exists, build if not
if [[ ! -L "$QVM_VM_RUNNER" || ! -f "$(readlink -f "$QVM_VM_RUNNER" 2>/dev/null || echo "")" ]]; then
log_info "First run detected - building VM..."
log_info "This may take several minutes."
SCRIPT_DIR="$(dirname "$0")"
if ! "$SCRIPT_DIR/qvm-rebuild"; then
die "Failed to build VM. Run 'qvm rebuild' manually to debug."
fi
fi
# Verify VM runner exists now
if [[ ! -L "$QVM_VM_RUNNER" ]]; then
die "VM runner not found at $QVM_VM_RUNNER. Run 'qvm rebuild' first."
fi
local vm_script
vm_script=$(readlink -f "$QVM_VM_RUNNER")
if [[ ! -f "$vm_script" ]]; then
die "VM runner script not found. Run 'qvm rebuild' to fix."
fi
# Find available SSH port
local ssh_port
ssh_port=$(find_available_port 2222)
log_info "Using SSH port: $ssh_port"
# Get memory and CPU settings from environment or use defaults
local memory="${QVM_MEMORY:-30G}"
local cpus="${QVM_CPUS:-30}"
log_info "VM resources: ${memory} memory, ${cpus} CPUs"
# Build QEMU options
local qemu_opts
qemu_opts=$(build_qemu_opts "$ssh_port")
# Launch VM using the NixOS runner script
# The runner script respects these environment variables:
# - QEMU_OPTS: additional QEMU options
# - NIX_DISK_IMAGE: path to disk image (optional, uses tmpdir by default)
log_info "Launching VM..."
# Create persistent disk image location if needed
local disk_image="$QVM_STATE_DIR/qvm-dev.qcow2"
export QEMU_OPTS="$qemu_opts -m $memory -smp $cpus"
export QEMU_NET_OPTS="hostfwd=tcp::${ssh_port}-:22"
export NIX_DISK_IMAGE="$disk_image"
# Run VM - the script uses exec with qemu's -daemonize flag, so it returns quickly
if ! "$vm_script" &>/dev/null; then
cleanup_on_failure
die "Failed to start VM"
fi
# Wait a moment for QEMU to create PID file
sleep 2
# If PID file wasn't created by our QEMU_OPTS, get it from the background process
if [[ ! -f "$QVM_PID_FILE" ]]; then
# Try to find the QEMU process
local qemu_pid
qemu_pid=$(pgrep -f "qemu.*qvm-dev" | head -1 || echo "")
if [[ -n "$qemu_pid" ]]; then
echo "$qemu_pid" > "$QVM_PID_FILE"
fi
fi
# Save SSH port to file
echo "$ssh_port" > "$QVM_SSH_PORT_FILE"
# Wait for SSH to become available
if ! wait_for_ssh "$ssh_port" 120; then
cleanup_on_failure
die "VM started but SSH did not become available. Check: $QVM_SERIAL_LOG"
fi
# Success!
log_info "VM started successfully"
echo ""
echo "SSH available on port: $ssh_port"
echo "Connect with: qvm ssh"
echo "Check status: qvm status"
echo "Serial log: $QVM_SERIAL_LOG"
}
# Run main function
main "$@"