Replace Bash qvm scripts with Go CLI implementation

This commit is contained in:
Joshua Bell 2026-01-26 20:48:32 -06:00
parent ffb456707f
commit 2a6a333721
27 changed files with 2551 additions and 1702 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
result
qvm

View file

@ -522,12 +522,13 @@ $(pwd) ──9p──→ /workspace/{hash}/
## Contributing ## Contributing
Contributions welcome! This is a simple Bash-based tool designed to be readable and hackable. Contributions welcome! This is a Go CLI tool designed to be readable and maintainable.
**Key files:** **Key files:**
- `bin/qvm` - Main dispatcher - `cmd/qvm/` - CLI command implementations (Cobra)
- `bin/qvm-*` - Subcommand implementations - `internal/vm/` - VM lifecycle and QEMU management
- `lib/common.sh` - Shared utilities and paths - `internal/workspace/` - Workspace registration and mounting
- `internal/config/` - Configuration and XDG paths
- `flake/default-vm/flake.nix` - Default VM template - `flake/default-vm/flake.nix` - Default VM template
**Development:** **Development:**
@ -536,7 +537,12 @@ Contributions welcome! This is a simple Bash-based tool designed to be readable
git clone https://github.com/yourusername/qvm git clone https://github.com/yourusername/qvm
cd qvm cd qvm
nix develop nix develop
./bin/qvm start
# Build the Go binary
go build ./cmd/qvm
# Run locally
./qvm start
``` ```
## License ## License

98
bin/qvm
View file

@ -1,98 +0,0 @@
#!/usr/bin/env bash
#
# qvm - Main dispatcher for QVM (QEMU Development VM) commands
#
# This script routes subcommands to their respective qvm-* implementations.
# It sources common.sh for shared configuration and utility functions.
#
set -euo pipefail
# Determine script directory for locating sibling scripts
readonly SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# Source common library (use QVM_LIB_DIR from wrapper or relative path for dev)
source "${QVM_LIB_DIR:-${SCRIPT_DIR}/../lib}/common.sh"
readonly VERSION="0.1.0"
#
# show_help - Display usage information
#
show_help() {
cat <<EOF
qvm - QEMU Development VM Manager
USAGE:
qvm <command> [args...]
COMMANDS:
start Start the VM (create if needed)
stop Stop the running VM
run Execute a command in the VM (or start shell if no command)
ssh Open SSH session or run command in VM
status Show VM status and information
rebuild Rebuild the base VM image from flake
reset Delete overlay and start fresh (keeps base image)
clean Remove ALL QVM data (images, state, caches)
OPTIONS:
-h, --help Show this help message
-v, --version Show version information
EXAMPLES:
qvm start Start the VM
qvm ssh Open interactive SSH session
qvm run 'ls -la' Run command in VM
qvm status Check if VM is running
qvm stop Stop the VM
For more information on a specific command, run:
qvm <command> --help
EOF
}
#
# show_version - Display version information
#
show_version() {
echo "qvm version ${VERSION}"
}
#
# main - Parse arguments and route to subcommand
#
main() {
# Handle no arguments
if [[ $# -eq 0 ]]; then
show_help
exit 0
fi
local subcommand="$1"
shift
case "$subcommand" in
start|stop|run|ssh|status|rebuild|reset|clean)
# Route to the appropriate qvm-* script
# Use exec to replace this process with the subcommand
exec "${SCRIPT_DIR}/qvm-${subcommand}" "$@"
;;
help|--help|-h)
show_help
exit 0
;;
--version|-v)
show_version
exit 0
;;
*)
log_error "Unknown command: ${subcommand}"
echo "" >&2
show_help >&2
exit 1
;;
esac
}
main "$@"

View file

@ -1,114 +0,0 @@
#!/usr/bin/env bash
#
# qvm-clean - Completely remove all QVM state, images, and caches
#
# This script performs a full cleanup of all QVM-related data:
# - Base image (base.qcow2)
# - VM overlay and state (overlay.qcow2, pid, ssh port, logs, workspaces)
# - Build caches (cargo, pnpm, sccache)
# - Optionally: user configuration (flake)
#
# WARNING: This is destructive and cannot be undone!
#
# Usage: qvm clean [-f|--force]
# -f, --force Skip confirmation prompt
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Get path to qvm-stop script
readonly QVM_STOP="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-stop"
#
# confirm_clean - Prompt user for confirmation
# Args: $1 - whether to delete config (true/false)
# Returns: 0 if user confirms, exits script if user cancels
#
confirm_clean() {
echo
log_warn "This will delete ALL QVM data:"
echo " - Base image: $QVM_DATA_DIR"
echo " - State/overlay: $QVM_STATE_DIR"
echo " - Build caches: $QVM_CACHE_DIR"
echo " - Config/flake: $QVM_CONFIG_DIR"
echo
log_warn "This operation CANNOT be undone!"
echo "You will need to rebuild the base image from scratch next time."
echo
read -p "Are you absolutely sure? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Clean cancelled"
exit 0
fi
}
#
# main - Main cleanup orchestration
#
main() {
local force=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-f|--force)
force=true
shift
;;
*)
die "Unknown option: $1"
;;
esac
done
# Confirm unless --force is used
if [[ "$force" != "true" ]]; then
confirm_clean
fi
# Stop VM if running
if is_vm_running; then
log_info "Stopping running VM..."
"$QVM_STOP"
fi
# Delete directories
log_info "Removing QVM data directories..."
if [[ -d "$QVM_DATA_DIR" ]]; then
log_info " - Deleting: $QVM_DATA_DIR"
rm -rf "$QVM_DATA_DIR"
fi
if [[ -d "$QVM_STATE_DIR" ]]; then
log_info " - Deleting: $QVM_STATE_DIR"
rm -rf "$QVM_STATE_DIR"
fi
if [[ -d "$QVM_CACHE_DIR" ]]; then
log_info " - Deleting: $QVM_CACHE_DIR"
rm -rf "$QVM_CACHE_DIR"
fi
if [[ -d "$QVM_CONFIG_DIR" ]]; then
log_info " - Deleting: $QVM_CONFIG_DIR"
rm -rf "$QVM_CONFIG_DIR"
fi
# Print success message
echo
log_info "QVM cleaned successfully!"
echo
echo "All QVM data has been removed from your system."
echo "Next run of 'qvm start' will initialize everything from scratch."
echo
}
main "$@"

View file

@ -1,151 +0,0 @@
#!/usr/bin/env bash
#
# qvm-rebuild - Build the base qcow2 image from user's flake
#
# This script builds the QVM base image by:
# - Ensuring a user flake exists (copying default if needed)
# - Running nix build on the user's flake configuration
# - Copying the resulting qcow2 to the base image location
# - Optionally warning if VM is running
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# ensure_user_flake - Ensure user's flake exists, copy default if missing
#
ensure_user_flake() {
if [[ -f "$QVM_USER_FLAKE/flake.nix" ]]; then
log_info "Using existing user flake: $QVM_USER_FLAKE/flake.nix"
return 0
fi
log_info "User flake not found, copying default template..."
# Determine default flake location
# In installed version: $QVM_LIB_DIR/../share/qvm/default-vm/
# In development: $(dirname "$0")/../flake/default-vm/
local default_flake_dir
# Try installed location first ($QVM_LIB_DIR is $out/lib/qvm)
if [[ -d "$QVM_LIB_DIR/../../share/qvm/default-vm" ]]; then
default_flake_dir="$QVM_LIB_DIR/../../share/qvm/default-vm"
else
# Fall back to development location
default_flake_dir="$(dirname "$(readlink -f "$0")")/../flake/default-vm"
fi
if [[ ! -d "$default_flake_dir" ]]; then
die "Default flake template not found at: $default_flake_dir"
fi
if [[ ! -f "$default_flake_dir/flake.nix" ]]; then
die "Default flake.nix not found at: $default_flake_dir/flake.nix"
fi
# Create user flake directory and copy template
mkdir -p "$QVM_USER_FLAKE"
cp -r "$default_flake_dir"/* "$QVM_USER_FLAKE/"
log_info "Default flake copied to: $QVM_USER_FLAKE"
echo ""
echo "You can customize your VM by editing: $QVM_USER_FLAKE/flake.nix"
echo ""
}
#
# build_vm - Build the VM runner using nix
#
build_base_image() {
log_info "Building VM from flake..."
# Build the VM output from user's flake
local build_result="$QVM_STATE_DIR/vm-result"
if ! nix build "$QVM_USER_FLAKE#vm" --out-link "$build_result"; then
die "Failed to build VM. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix"
fi
# Verify the result contains the VM runner script
local vm_runner="$build_result/bin/run-qvm-dev-vm"
if [[ ! -f "$vm_runner" ]]; then
# Try alternate name pattern
vm_runner=$(find "$build_result/bin" -name "run-*-vm" -type f 2>/dev/null | head -1)
if [[ -z "$vm_runner" || ! -f "$vm_runner" ]]; then
die "Build succeeded but VM runner script not found in: $build_result/bin/"
fi
fi
# Move the result symlink to data dir (keeps nix store reference)
rm -f "$QVM_DATA_DIR/vm-result"
mv "$build_result" "$QVM_DATA_DIR/vm-result"
# Get the basename of the runner script and construct path in new location
local runner_name
runner_name=$(basename "$vm_runner")
vm_runner="$QVM_DATA_DIR/vm-result/bin/$runner_name"
# Create a symlink to the VM runner at our standard location
log_info "Installing VM runner to: $QVM_VM_RUNNER"
rm -f "$QVM_VM_RUNNER"
ln -sf "$vm_runner" "$QVM_VM_RUNNER"
log_info "VM built successfully"
echo ""
echo "VM runner: $QVM_VM_RUNNER"
}
#
# warn_if_running - Warn user if VM is currently running
#
warn_if_running() {
if is_vm_running; then
log_warn "VM is currently running"
echo ""
echo "The new base image will only take effect after restarting the VM:"
echo " qvm stop"
echo " qvm start"
echo ""
echo "Note: Changes to your VM overlay will be preserved."
echo " Use 'qvm reset' to start fresh with the new base image."
echo ""
fi
}
#
# main - Main execution flow
#
main() {
log_info "Rebuilding QVM base image..."
# Ensure required directories exist
ensure_dirs
# Ensure user has a flake configuration
ensure_user_flake
# Build the base image
build_base_image
# Warn if VM is running
warn_if_running
# Print next steps
echo "Next steps:"
if is_vm_running; then
echo " 1. Stop the VM: qvm stop"
echo " 2. Start the VM: qvm start"
else
echo " - Start the VM: qvm start"
fi
echo " - Customize the VM: edit $QVM_USER_FLAKE/flake.nix"
echo " - Reset to fresh state: qvm reset"
}
# Run main function
main "$@"

View file

@ -1,115 +0,0 @@
#!/usr/bin/env bash
#
# qvm-reset - Wipe VM overlay and workspace registry
#
# This script resets the VM to a clean state by deleting the overlay.qcow2
# and workspaces.json files. This is useful when you want to start fresh
# or if the VM state has become corrupted.
#
# IMPORTANT: This does NOT delete the base image (base.qcow2), so you won't
# need to re-download or rebuild the NixOS image.
#
# Usage: qvm reset [-f|--force]
# -f, --force Skip confirmation prompt
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Get path to qvm-stop script
readonly QVM_STOP="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-stop"
#
# confirm_reset - Prompt user for confirmation
# Returns: 0 if user confirms, exits script if user cancels
#
confirm_reset() {
echo
log_warn "This will delete:"
if [[ -f "$QVM_OVERLAY" ]]; then
echo " - $QVM_OVERLAY"
fi
if [[ -f "$QVM_WORKSPACES_FILE" ]]; then
echo " - $QVM_WORKSPACES_FILE"
fi
if [[ ! -f "$QVM_OVERLAY" ]] && [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "No files to delete (already clean)"
exit 0
fi
echo
echo "The base image (base.qcow2) and cache directories will NOT be deleted."
echo
read -p "Continue with reset? [y/N] " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Reset cancelled"
exit 0
fi
}
#
# main - Main reset orchestration
#
main() {
local force=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-f|--force)
force=true
shift
;;
*)
die "Unknown option: $1"
;;
esac
done
# Confirm unless --force is used
if [[ "$force" != true ]]; then
confirm_reset
fi
# Stop VM if running
if is_vm_running; then
log_info "VM is running, stopping it first..."
"$QVM_STOP"
fi
# Delete overlay if it exists
if [[ -f "$QVM_OVERLAY" ]]; then
log_info "Deleting overlay: $QVM_OVERLAY"
rm -f "$QVM_OVERLAY"
else
log_info "Overlay does not exist (nothing to delete)"
fi
# Delete workspaces registry if it exists
if [[ -f "$QVM_WORKSPACES_FILE" ]]; then
log_info "Deleting workspaces registry: $QVM_WORKSPACES_FILE"
rm -f "$QVM_WORKSPACES_FILE"
else
log_info "Workspaces registry does not exist (nothing to delete)"
fi
# Print success message with next steps
echo
log_info "Reset complete!"
echo
echo "Next steps:"
echo " - Run 'qvm start' to boot the VM with a fresh overlay"
echo " - Your base image (base.qcow2) is still intact"
echo " - Cache directories (cargo-home, pnpm-store, etc.) are preserved"
echo
}
main "$@"

View file

@ -1,309 +0,0 @@
#!/usr/bin/env bash
#
# qvm-run - Execute a command in the VM workspace
#
# This script:
# 1. Ensures VM is running (auto-starts if needed)
# 2. Registers current $PWD as a workspace in workspaces.json
# 3. SSHes into VM and executes command in workspace mount point
# 4. Streams output and preserves exit code
#
# Usage: qvm-run <command> [args...]
#
# Notes:
# - Workspaces are pre-mounted at VM start time (no dynamic 9p hotplug)
# - If workspace not already registered, warns to restart VM
# - Uses workspace hash for mount tag and path
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# show_usage - Display usage information
#
show_usage() {
cat <<EOF
Usage: qvm run <command> [args...]
Execute a command in the VM at the current workspace directory.
The current directory (\$PWD) is automatically registered as a workspace
and mounted into the VM. Commands run in the mounted workspace directory.
Examples:
qvm run cargo build
qvm run npm install
qvm run ls -la
qvm run bash -c "pwd && ls"
Notes:
- Workspaces are mounted at VM start time
- If this workspace is new, you'll need to restart the VM
- Command output streams to your terminal
- Exit code matches the command's exit code
EOF
}
#
# register_workspace - Add workspace to registry if not already present
# Args: $1 - absolute path to workspace
# $2 - workspace hash
# Returns: 0 if already registered, 1 if newly added (requires VM restart)
#
register_workspace() {
local workspace_path="$1"
local hash="$2"
local dir_name
dir_name=$(basename "$workspace_path")
local mount_tag="ws_${hash}"
local guest_path="/workspace/${hash}_${dir_name}"
# Create workspaces.json if it doesn't exist
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
echo '[]' > "$QVM_WORKSPACES_FILE"
fi
# Check if workspace already registered
if jq -e --arg path "$workspace_path" '.[] | select(.host_path == $path)' "$QVM_WORKSPACES_FILE" >/dev/null 2>&1; then
log_info "Workspace already registered: $workspace_path"
return 0
fi
# Add new workspace to registry
log_info "Registering new workspace: $workspace_path"
local temp_file
temp_file=$(mktemp)
jq --arg path "$workspace_path" \
--arg hash "$hash" \
--arg tag "$mount_tag" \
--arg guest "$guest_path" \
'. += [{
host_path: $path,
hash: $hash,
mount_tag: $tag,
guest_path: $guest
}]' "$QVM_WORKSPACES_FILE" > "$temp_file"
mv "$temp_file" "$QVM_WORKSPACES_FILE"
log_info "Workspace registered as $mount_tag -> $guest_path"
return 1 # Indicate new workspace added
}
#
# ensure_workspace_mounted - Mount workspace in VM if not already mounted
# Args: $1 - SSH port
# $2 - mount tag (e.g., ws_abc123)
# $3 - guest path (e.g., /workspace/abc123)
# Returns: 0 on success
#
ensure_workspace_mounted() {
local ssh_port="$1"
local mount_tag="$2"
local guest_path="$3"
# SSH into VM and mount the workspace
# - mkdir creates the mount point if missing
# - mount attempts to mount the 9p virtfs
# - || true ensures we don't fail if already mounted
sshpass -p root ssh -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o LogLevel=ERROR \
-o PubkeyAuthentication=no \
-o PasswordAuthentication=yes \
-p "$ssh_port" \
root@localhost \
"mkdir -p '$guest_path' && mount -t 9p -o trans=virtio,version=9p2000.L,msize=104857600 '$mount_tag' '$guest_path' 2>/dev/null || true" >/dev/null 2>&1
return 0
}
#
# is_workspace_mounted - Check if workspace is actually mounted in VM
# Args: $1 - SSH port
# $2 - guest path
# Returns: 0 if mounted, 1 if not
#
is_workspace_mounted() {
local ssh_port="$1"
local guest_path="$2"
# SSH into VM and check if guest path exists and is a directory
if sshpass -p root ssh -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o LogLevel=ERROR \
-o PubkeyAuthentication=no \
-o PasswordAuthentication=yes \
-p "$ssh_port" \
root@localhost \
"test -d '$guest_path'" 2>/dev/null; then
return 0
else
return 1
fi
}
#
# main - Main execution flow
#
main() {
# Show usage if no arguments
# Handle help flags first
if [[ $# -gt 0 && ( "$1" == "-h" || "$1" == "--help" ) ]]; then
show_usage
exit 0
fi
# Ensure directories exist before accessing workspaces.json
ensure_dirs
# If no command given, default to interactive zsh shell
local run_shell=false
if [[ $# -eq 0 ]]; then
run_shell=true
fi
# Get current workspace (absolute path)
local workspace_path
workspace_path="$(pwd)"
# Generate workspace hash and guest path
local hash
hash=$(workspace_hash "$workspace_path")
local dir_name
dir_name=$(basename "$workspace_path")
local guest_path="/workspace/${hash}_${dir_name}"
log_info "Workspace: $workspace_path"
log_info "Guest path: $guest_path"
# Register workspace in registry
local newly_added=0
if ! register_workspace "$workspace_path" "$hash"; then
newly_added=1
fi
# If this is a newly registered workspace, restart VM to mount it
if [[ "$newly_added" -eq 1 ]] && is_vm_running; then
log_info "New workspace registered. Restarting VM to mount it..."
local qvm_stop="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-stop"
local qvm_start="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-start"
# Stop the VM
if ! "$qvm_stop"; then
die "Failed to stop VM"
fi
# Start the VM with new workspace mount
if ! "$qvm_start"; then
die "Failed to start VM"
fi
log_info "VM restarted with new workspace mounted"
fi
# Ensure VM is running (if it wasn't running before)
if ! is_vm_running; then
log_info "VM not running, starting..."
local qvm_start="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-start"
if ! "$qvm_start"; then
die "Failed to start VM"
fi
fi
# Get SSH port
local ssh_port
ssh_port=$(get_ssh_port)
# Get mount tag from workspaces.json
local mount_tag
mount_tag=$(jq -r --arg path "$workspace_path" '.[] | select(.host_path == $path) | .mount_tag' "$QVM_WORKSPACES_FILE")
# Ensure workspace is mounted (auto-mount if not)
log_info "Ensuring workspace is mounted..."
ensure_workspace_mounted "$ssh_port" "$mount_tag" "$guest_path"
# Verify workspace is actually mounted
if ! is_workspace_mounted "$ssh_port" "$guest_path"; then
log_error "Failed to mount workspace in VM"
echo ""
echo "The workspace could not be mounted automatically."
echo "This may indicate the VM was started before this workspace was registered."
echo ""
echo "Please restart the VM to properly configure the workspace:"
echo " qvm stop"
echo " qvm start"
echo ""
echo "Then try your command again."
exit 1
fi
# Build SSH command
# - Use sshpass for automated password auth (password: root)
# - Use -t if stdin is a TTY (for interactive commands)
# - Suppress SSH warnings (ephemeral VM, host keys change)
# - cd to guest path and execute command
local ssh_cmd=(
sshpass -p root
ssh
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o LogLevel=ERROR
-o PubkeyAuthentication=no
-o PasswordAuthentication=yes
-p "$ssh_port"
)
# Add -t flag if stdin is a TTY
if [[ -t 0 ]]; then
ssh_cmd+=(-t)
fi
# Add connection target
ssh_cmd+=(root@localhost)
# Build remote command: cd to workspace and execute user's command (or shell)
local remote_cmd="cd '$guest_path'"
if [[ "$run_shell" == "true" ]]; then
# No command - start interactive zsh shell
remote_cmd+=" && exec zsh"
else
# Append user's command with proper quoting
remote_cmd+=" && "
local first_arg=1
for arg in "$@"; do
if [[ $first_arg -eq 1 ]]; then
remote_cmd+="$arg"
first_arg=0
else
# Quote arguments that contain spaces or special characters
if [[ "$arg" =~ [[:space:]] ]]; then
remote_cmd+=" '$arg'"
else
remote_cmd+=" $arg"
fi
fi
done
fi
# Add the remote command as final SSH argument
ssh_cmd+=("$remote_cmd")
# Execute SSH command (replaces current process)
exec "${ssh_cmd[@]}"
}
# Run main function
main "$@"

View file

@ -1,121 +0,0 @@
#!/usr/bin/env bash
#
# qvm-ssh - Direct SSH access to the VM
#
# This script provides SSH access to the running VM:
# - Auto-starts VM if not running
# - Interactive shell by default (detects TTY)
# - Single command execution with -c flag
# - Passes through additional SSH arguments
# - Uses StrictHostKeyChecking=no for host key management
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# show_usage - Display help text
#
show_usage() {
cat <<EOF
Usage: qvm ssh [OPTIONS] [SSH_ARGS...]
Open an SSH session to the VM or run a single command.
OPTIONS:
-c COMMAND Run a single command instead of interactive shell
-h, --help Show this help message
EXAMPLES:
qvm ssh # Open interactive shell
qvm ssh -c "ls -la" # Run single command
qvm ssh -c "pwd" -v # Run command with SSH verbose flag
NOTES:
- VM will auto-start if not running
- SSH connects as root@localhost
- Host key checking is disabled (VM overlay is ephemeral)
- Additional SSH arguments are passed through
EOF
}
#
# main - Main execution flow
#
main() {
local command_mode=false
local command=""
local ssh_args=()
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
show_usage
exit 0
;;
-c)
if [[ $# -lt 2 ]]; then
die "Option -c requires a command argument"
fi
command_mode=true
command="$2"
shift 2
;;
*)
# Collect remaining arguments for SSH
ssh_args+=("$1")
shift
;;
esac
done
# Ensure VM is running (auto-start if needed)
if ! is_vm_running; then
log_info "VM is not running, starting it..."
" "${QVM_BIN_DIR:-$(dirname "$0")}/qvm-start""
fi
# Get SSH port
local port
port=$(get_ssh_port)
# Build SSH command with sshpass for automated password auth
local ssh_cmd=(
sshpass -p root
ssh
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o LogLevel=ERROR # Suppress host key warnings
-o PubkeyAuthentication=no # Use password auth (password: root)
-o PasswordAuthentication=yes
-p "$port"
root@localhost
)
# Add TTY flag for interactive sessions (not in command mode)
if [[ "$command_mode" = false ]] && [[ -t 0 ]]; then
ssh_cmd+=(-t)
fi
# Add any pass-through SSH arguments
if [[ ${#ssh_args[@]} -gt 0 ]]; then
ssh_cmd+=("${ssh_args[@]}")
fi
# Add command if in command mode
if [[ "$command_mode" = true ]]; then
ssh_cmd+=("$command")
fi
# Execute SSH (replace shell process)
exec "${ssh_cmd[@]}"
}
# Run main function
main "$@"

View file

@ -1,215 +0,0 @@
#!/usr/bin/env bash
#
# qvm-start - Launch the QVM using the NixOS VM runner
#
# This script starts the QVM virtual machine by:
# - Building the VM if not already built
# - Configuring QEMU options via environment variables
# - Adding 9p mounts for caches and workspaces
# - Starting the VM in the background
# - Waiting for SSH to become available
#
set -euo pipefail
# Source common library
QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}"
# shellcheck source=lib/common.sh
source "$QVM_LIB_DIR/common.sh"
#
# find_available_port - Find an available TCP port starting from base
#
find_available_port() {
local port="${1:-2222}"
local max_attempts=100
local attempt=0
while (( attempt < max_attempts )); do
if ! nc -z localhost "$port" 2>/dev/null; then
echo "$port"
return 0
fi
(( port++ )) || true
(( attempt++ )) || true
done
die "Could not find available port after $max_attempts attempts"
}
#
# build_qemu_opts - Build QEMU_OPTS environment variable with 9p mounts
#
build_qemu_opts() {
local ssh_port="$1"
local opts=""
# 9p mounts for shared caches
opts+="-virtfs local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr "
opts+="-virtfs local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr "
# Mount host opencode config if it exists
if [[ -d "$QVM_HOST_OPENCODE_CONFIG" ]]; then
log_info "Adding opencode config mount..."
opts+="-virtfs local,path=$QVM_HOST_OPENCODE_CONFIG,mount_tag=opencode_config,security_model=mapped-xattr "
fi
# Add workspace mounts from registry
if [[ -f "$QVM_WORKSPACES_FILE" && -s "$QVM_WORKSPACES_FILE" ]]; then
local workspace_count
workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if [[ "$workspace_count" -gt 0 ]]; then
log_info "Adding $workspace_count workspace mount(s)..."
local i=0
while (( i < workspace_count )); do
local path mount_tag
path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE")
mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE")
if [[ -n "$path" && -n "$mount_tag" && "$path" != "null" && "$mount_tag" != "null" && -d "$path" ]]; then
log_info " - $path -> $mount_tag"
opts+="-virtfs local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr "
fi
(( i++ )) || true
done
fi
fi
# Serial console to log file and daemonize
opts+="-serial file:$QVM_SERIAL_LOG "
opts+="-display none "
opts+="-daemonize "
opts+="-pidfile $QVM_PID_FILE "
echo "$opts"
}
#
# cleanup_on_failure - Clean up state files if VM start fails
#
cleanup_on_failure() {
log_warn "Cleaning up after failed start..."
rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE"
}
#
# main - Main execution flow
#
main() {
log_info "Starting QVM..."
# Check if VM is already running
if is_vm_running; then
log_info "VM is already running"
local port
port=$(get_ssh_port)
echo "SSH available on port: $port"
echo "Use 'qvm ssh' to connect or 'qvm status' for details"
exit 0
fi
# First-run initialization
ensure_dirs
# Source config file if it exists (sets QVM_MEMORY, QVM_CPUS, etc.)
# Check system-wide config first, then user config (user overrides system)
if [[ -f "/etc/xdg/qvm/qvm.conf" ]]; then
source "/etc/xdg/qvm/qvm.conf"
fi
if [[ -f "$QVM_CONFIG_FILE" ]]; then
source "$QVM_CONFIG_FILE"
fi
# Check if VM runner exists, build if not
if [[ ! -L "$QVM_VM_RUNNER" || ! -f "$(readlink -f "$QVM_VM_RUNNER" 2>/dev/null || echo "")" ]]; then
log_info "First run detected - building VM..."
log_info "This may take several minutes."
local qvm_rebuild="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-rebuild"
if ! "$qvm_rebuild"; then
die "Failed to build VM. Run 'qvm rebuild' manually to debug."
fi
fi
# Verify VM runner exists now
if [[ ! -L "$QVM_VM_RUNNER" ]]; then
die "VM runner not found at $QVM_VM_RUNNER. Run 'qvm rebuild' first."
fi
local vm_script
vm_script=$(readlink -f "$QVM_VM_RUNNER")
if [[ ! -f "$vm_script" ]]; then
die "VM runner script not found. Run 'qvm rebuild' to fix."
fi
# Find available SSH port
local ssh_port
ssh_port=$(find_available_port 2222)
log_info "Using SSH port: $ssh_port"
# Get memory and CPU settings from environment or use defaults
local memory="${QVM_MEMORY:-30G}"
local cpus="${QVM_CPUS:-30}"
log_info "VM resources: ${memory} memory, ${cpus} CPUs"
# Build QEMU options
local qemu_opts
qemu_opts=$(build_qemu_opts "$ssh_port")
# Launch VM using the NixOS runner script
# The runner script respects these environment variables:
# - QEMU_OPTS: additional QEMU options
# - NIX_DISK_IMAGE: path to disk image (optional, uses tmpdir by default)
log_info "Launching VM..."
# Create persistent disk image location if needed
local disk_image="$QVM_STATE_DIR/qvm-dev.qcow2"
export QEMU_OPTS="$qemu_opts -m $memory -smp $cpus"
export QEMU_NET_OPTS="hostfwd=tcp::${ssh_port}-:22"
export NIX_DISK_IMAGE="$disk_image"
# Run VM - the script uses exec with qemu's -daemonize flag, so it returns quickly
if ! "$vm_script" &>/dev/null; then
cleanup_on_failure
die "Failed to start VM"
fi
# Wait a moment for QEMU to create PID file
sleep 2
# If PID file wasn't created by our QEMU_OPTS, get it from the background process
if [[ ! -f "$QVM_PID_FILE" ]]; then
# Try to find the QEMU process
local qemu_pid
qemu_pid=$(pgrep -f "qemu.*qvm-dev" | head -1 || echo "")
if [[ -n "$qemu_pid" ]]; then
echo "$qemu_pid" > "$QVM_PID_FILE"
fi
fi
# Save SSH port to file
echo "$ssh_port" > "$QVM_SSH_PORT_FILE"
# Wait for SSH to become available
if ! wait_for_ssh "$ssh_port" 120; then
cleanup_on_failure
die "VM started but SSH did not become available. Check: $QVM_SERIAL_LOG"
fi
# Success!
log_info "VM started successfully"
echo ""
echo "SSH available on port: $ssh_port"
echo "Connect with: qvm ssh"
echo "Check status: qvm status"
echo "Serial log: $QVM_SERIAL_LOG"
}
# Run main function
main "$@"

View file

@ -1,228 +0,0 @@
#!/usr/bin/env bash
#
# qvm-status - Display VM state, configuration, and connection information
#
# Shows current VM status including:
# - Running state (PID, uptime, SSH port)
# - Mounted workspaces from workspaces.json
# - Cache directory status
# - Base image and overlay details
# - Connection hints for SSH and run commands
#
# Exit codes:
# 0 - VM is running
# 1 - VM is stopped
set -euo pipefail
# Source common library for shared functions and constants
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$0")/../lib}"
source "${QVM_LIB_DIR}/common.sh"
# Additional color codes for status display
if [[ -t 1 ]]; then
readonly COLOR_SUCCESS='\033[0;32m' # Green
readonly COLOR_HEADER='\033[1;37m' # Bold White
readonly COLOR_DIM='\033[0;90m' # Dim Gray
else
readonly COLOR_SUCCESS=''
readonly COLOR_HEADER=''
readonly COLOR_DIM=''
fi
#
# format_bytes - Convert bytes to human-readable format
# Args: $1 - size in bytes
# Returns: formatted string (e.g., "1.5G", "256M", "4.0K")
#
format_bytes() {
local bytes="$1"
if (( bytes >= 1073741824 )); then
printf "%.1fG" "$(echo "scale=1; $bytes / 1073741824" | bc)"
elif (( bytes >= 1048576 )); then
printf "%.1fM" "$(echo "scale=1; $bytes / 1048576" | bc)"
elif (( bytes >= 1024 )); then
printf "%.1fK" "$(echo "scale=1; $bytes / 1024" | bc)"
else
printf "%dB" "$bytes"
fi
}
#
# get_uptime - Calculate VM uptime from PID
# Args: $1 - process PID
# Returns: uptime string (e.g., "2h 15m", "45m", "30s")
#
get_uptime() {
local pid="$1"
# Get process start time in seconds since epoch
local start_time
start_time=$(ps -p "$pid" -o lstart= 2>/dev/null | xargs -I{} date -d "{}" +%s)
if [[ -z "$start_time" ]]; then
echo "unknown"
return
fi
local current_time
current_time=$(date +%s)
local uptime_seconds=$((current_time - start_time))
# Format uptime
local hours=$((uptime_seconds / 3600))
local minutes=$(( (uptime_seconds % 3600) / 60 ))
local seconds=$((uptime_seconds % 60))
if (( hours > 0 )); then
printf "%dh %dm" "$hours" "$minutes"
elif (( minutes > 0 )); then
printf "%dm %ds" "$minutes" "$seconds"
else
printf "%ds" "$seconds"
fi
}
#
# show_file_info - Display file status with size and modification time
# Args: $1 - file path
# $2 - label (e.g., "Base Image")
#
show_file_info() {
local file="$1"
local label="$2"
if [[ -f "$file" ]]; then
local size_bytes
size_bytes=$(stat -c %s "$file" 2>/dev/null || echo "0")
local size_human
size_human=$(format_bytes "$size_bytes")
local mod_time
mod_time=$(stat -c %y "$file" 2>/dev/null | cut -d. -f1)
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $label: ${COLOR_INFO}$size_human${COLOR_RESET} ${COLOR_DIM}(modified: $mod_time)${COLOR_RESET}"
else
echo -e " ${COLOR_WARN}✗${COLOR_RESET} $label: ${COLOR_WARN}missing${COLOR_RESET}"
fi
}
#
# show_dir_info - Display directory status
# Args: $1 - directory path
# $2 - label (e.g., "Cargo Home")
#
show_dir_info() {
local dir="$1"
local label="$2"
if [[ -d "$dir" ]]; then
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $label: ${COLOR_DIM}$dir${COLOR_RESET}"
else
echo -e " ${COLOR_WARN}✗${COLOR_RESET} $label: ${COLOR_WARN}not created${COLOR_RESET}"
fi
}
#
# show_workspaces - Display mounted workspaces from workspaces.json
#
show_workspaces() {
if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then
echo -e " ${COLOR_DIM}No workspaces mounted${COLOR_RESET}"
return
fi
# Check if file is valid JSON and has workspaces
local workspace_count
workspace_count=$(jq 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0")
if (( workspace_count == 0 )); then
echo -e " ${COLOR_DIM}No workspaces mounted${COLOR_RESET}"
return
fi
# Parse and display each workspace
jq -r 'to_entries[] | "\(.key)|\(.value.host_path)|\(.value.guest_path)"' "$QVM_WORKSPACES_FILE" 2>/dev/null | while IFS='|' read -r hash host_path guest_path; do
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $hash: ${COLOR_INFO}$host_path${COLOR_RESET} → ${COLOR_DIM}$guest_path${COLOR_RESET}"
done
}
#
# main - Main status display logic
#
main() {
# Header
echo -e "${COLOR_HEADER}QVM Status${COLOR_RESET}"
echo ""
# VM State
echo -e "${COLOR_HEADER}VM State:${COLOR_RESET}"
if is_vm_running; then
local pid
pid=$(cat "$QVM_PID_FILE")
local ssh_port
if [[ -f "$QVM_SSH_PORT_FILE" ]]; then
ssh_port=$(cat "$QVM_SSH_PORT_FILE")
else
ssh_port="unknown"
fi
local uptime
uptime=$(get_uptime "$pid")
echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} Running"
echo -e " ${COLOR_DIM}PID:${COLOR_RESET} $pid"
echo -e " ${COLOR_DIM}SSH:${COLOR_RESET} localhost:$ssh_port"
echo -e " ${COLOR_DIM}Uptime:${COLOR_RESET} $uptime"
else
echo -e " ${COLOR_WARN}✗${COLOR_RESET} Stopped"
fi
echo ""
# Workspaces
echo -e "${COLOR_HEADER}Mounted Workspaces:${COLOR_RESET}"
show_workspaces
echo ""
# Cache Directories
echo -e "${COLOR_HEADER}Cache Directories:${COLOR_RESET}"
show_dir_info "$QVM_CARGO_HOME" "Cargo Home"
show_dir_info "$QVM_CARGO_TARGET" "Cargo Target"
show_dir_info "$QVM_PNPM_STORE" "PNPM Store"
show_dir_info "$QVM_SCCACHE" "SCCache"
echo ""
# VM Images
echo -e "${COLOR_HEADER}VM Images:${COLOR_RESET}"
show_file_info "$QVM_BASE_IMAGE" "Base Image"
show_file_info "$QVM_OVERLAY" "Overlay"
echo ""
# Connection Hints (only if VM is running)
if is_vm_running; then
local ssh_port
ssh_port=$(cat "$QVM_SSH_PORT_FILE" 2>/dev/null || echo "unknown")
echo -e "${COLOR_HEADER}Connection:${COLOR_RESET}"
echo -e " ${COLOR_INFO}SSH:${COLOR_RESET} qvm ssh"
echo -e " ${COLOR_INFO}Run cmd:${COLOR_RESET} qvm run <command>"
echo -e " ${COLOR_INFO}Direct:${COLOR_RESET} ssh -p $ssh_port root@localhost"
echo ""
# Exit success if running
exit 0
else
echo -e "${COLOR_HEADER}Quick Start:${COLOR_RESET}"
echo -e " ${COLOR_INFO}Start VM:${COLOR_RESET} qvm start"
echo ""
# Exit failure if stopped
exit 1
fi
}
# Execute main
main "$@"

View file

@ -1,94 +0,0 @@
#!/usr/bin/env bash
#
# qvm-stop - Gracefully shut down the QEMU VM
#
# This script stops the running VM by sending SIGTERM first for graceful
# shutdown, waiting up to 30 seconds, then sending SIGKILL if necessary.
# It cleans up state files (vm.pid, ssh.port) after shutdown completes.
#
# Usage: qvm stop
#
set -euo pipefail
# Source common library
readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}"
source "${QVM_LIB_DIR}/common.sh"
# Timeout for graceful shutdown in seconds
readonly SHUTDOWN_TIMEOUT=30
#
# wait_for_process_exit - Wait for process to terminate
# Args: $1 - PID to wait for
# $2 - timeout in seconds
# Returns: 0 if process exits, 1 on timeout
#
wait_for_process_exit() {
local pid="$1"
local timeout="$2"
local elapsed=0
while (( elapsed < timeout )); do
if ! kill -0 "$pid" 2>/dev/null; then
return 0
fi
sleep 1
(( elapsed++ ))
done
return 1
}
#
# main - Main shutdown orchestration
#
main() {
# Check if VM is running
if ! is_vm_running; then
log_info "VM is not running"
exit 0
fi
# Get VM process PID
local vm_pid
vm_pid=$(cat "$QVM_PID_FILE")
log_info "Shutting down VM (PID: $vm_pid)..."
# Send SIGTERM for graceful shutdown
if kill -TERM "$vm_pid" 2>/dev/null; then
log_info "Sent SIGTERM, waiting up to ${SHUTDOWN_TIMEOUT}s for graceful shutdown..."
if wait_for_process_exit "$vm_pid" "$SHUTDOWN_TIMEOUT"; then
log_info "VM shut down gracefully"
else
log_warn "Graceful shutdown timeout, forcefully terminating..."
# Send SIGKILL to force termination
if kill -KILL "$vm_pid" 2>/dev/null; then
# Wait briefly to ensure process is dead
sleep 1
# Verify process is actually dead
if kill -0 "$vm_pid" 2>/dev/null; then
die "Failed to kill VM process $vm_pid"
fi
log_info "VM forcefully terminated"
else
log_warn "Process $vm_pid already terminated"
fi
fi
else
log_warn "Process $vm_pid already terminated (could not send SIGTERM)"
fi
# Clean up state files
log_info "Cleaning up state files..."
rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE"
log_info "VM stopped successfully"
}
main "$@"

View file

@ -0,0 +1,135 @@
# virtio-fs Prerequisites Verification
## Date: 2026-01-26
This document verifies that all prerequisites for virtio-fs support are available in nixpkgs.
## Requirements Summary
virtio-fs requires:
1. **QEMU 4.2.0+** - vhost-user-fs device support introduced
2. **virtiofsd daemon** - User-space filesystem daemon for virtio-fs
3. **Guest kernel CONFIG_VIRTIO_FS** - Linux 5.4+ with virtio-fs driver enabled
---
## Verification Results
### 1. QEMU Version
**Command:**
```bash
nix eval nixpkgs#qemu.version --raw
```
**Result:**
```
10.1.2
```
**Status:** ✅ **PASS** - Version 10.1.2 is significantly newer than the required 4.2.0
**Notes:**
- QEMU 10.x includes full virtio-fs support with vhost-user-fs backend
- Package provides `qemu-kvm` as main program
- Multiple outputs available: out, doc, ga, debug
---
### 2. virtiofsd Package
**Command:**
```bash
nix search nixpkgs virtiofsd --json
```
**Result:**
```
legacyPackages.x86_64-linux.virtiofsd
```
**Version Check:**
```bash
nix eval nixpkgs#virtiofsd.version --raw
```
**Version:**
```
1.13.2
```
**Status:** ✅ **PASS** - virtiofsd is available as a standalone package
**Notes:**
- virtiofsd is available in nixpkgs as a separate package
- Version 1.13.2 is a modern Rust-based implementation
- This is the newer rust-virtiofsd, not the original C implementation from QEMU
---
### 3. Kernel CONFIG_VIRTIO_FS Support
**Command:**
```bash
nix-shell -p linux --run "zcat /proc/config.gz | grep CONFIG_VIRTIO_FS"
```
**Kernel Version:**
```bash
nix eval nixpkgs#linux.version --raw
```
**Version:** `6.12.63`
**Result:**
```
CONFIG_VIRTIO_FS=m
```
**Status:** ✅ **PASS** - virtio-fs is enabled as a loadable kernel module
**Notes:**
- Linux kernel 6.12.63 is much newer than the required 5.4+
- CONFIG_VIRTIO_FS is compiled as a module (`=m`)
- Module will be available in NixOS VM builds by default
- virtio-fs driver can be loaded on-demand
---
## NixOS-Specific Considerations
### Module Loading
- Kernel module `virtiofs` will need to be loaded in the guest
- NixOS typically handles this automatically via `boot.initrd.availableKernelModules` or runtime modprobe
### QEMU Integration
- QEMU package in nixpkgs is the full-featured build
- Includes vhost-user-fs device support
- No custom QEMU build needed
### virtiofsd Daemon
- Must be started on the host before launching VM
- Requires socket path for communication with QEMU
- NixOS can manage this via systemd if desired
---
## Conclusion
**✅ ALL PREREQUISITES AVAILABLE**
All required components for virtio-fs are present in nixpkgs:
- ✅ QEMU 10.1.2 (requirement: 4.2.0+)
- ✅ virtiofsd 1.13.2 (standalone package)
- ✅ Linux kernel 6.12.63 with CONFIG_VIRTIO_FS=m (requirement: 5.4+)
**No blockers identified.** We can proceed with virtio-fs implementation.
---
## Next Steps
1. Design virtiofsd socket/daemon management strategy
2. Update QEMU launch arguments to use vhost-user-fs-pci device
3. Configure guest kernel to mount virtio-fs filesystems
4. Update NixOS VM flake to include virtiofsd in systemPackages or as a service

View file

@ -53,67 +53,42 @@
let let
pkgs = import nixpkgs { inherit system; }; pkgs = import nixpkgs { inherit system; };
qvm = pkgs.stdenv.mkDerivation { qvm = pkgs.buildGoModule {
pname = "qvm"; pname = "qvm";
version = "0.1.0"; version = "0.1.0";
# NOTE: In a flake, only git-tracked files are included by default.
# The Go source files must be committed to git for this build to work.
# For development, use: go build ./cmd/qvm
src = ./.; src = ./.;
vendorHash = "sha256-d6Z32nPDawwFqhKfVw/QwHUuDuMuTdQdHApmxcXzFng=";
subPackages = [ "cmd/qvm" ];
nativeBuildInputs = with pkgs; [ nativeBuildInputs = with pkgs; [
makeWrapper makeWrapper
installShellFiles
]; ];
buildInputs = with pkgs; [ postInstall = ''
bash
];
installPhase = ''
runHook preInstall
# Create output directories
mkdir -p $out/bin
mkdir -p $out/lib/qvm
mkdir -p $out/share/qvm
# Install library files
install -Dm755 lib/common.sh $out/lib/qvm/common.sh
# Install default VM flake template # Install default VM flake template
if [ -d "flake/default-vm" ]; then if [ -d "$src/flake/default-vm" ]; then
cp -r flake/default-vm $out/share/qvm/default-vm mkdir -p $out/share/qvm
cp -r $src/flake/default-vm $out/share/qvm/default-vm
fi fi
# Install all scripts from bin/ # Wrap binary with PATH containing required dependencies
for script in bin/*; do wrapProgram $out/bin/qvm \
if [ -f "$script" ]; then
install -Dm755 "$script" "$out/bin/$(basename "$script")"
fi
done
# Wrap all scripts with PATH containing required dependencies
for script in $out/bin/*; do
wrapProgram "$script" \
--prefix PATH : ${ --prefix PATH : ${
pkgs.lib.makeBinPath [ pkgs.lib.makeBinPath [
pkgs.qemu pkgs.qemu
pkgs.openssh pkgs.openssh
pkgs.jq pkgs.jq
pkgs.coreutils
pkgs.gnused
pkgs.gnugrep
pkgs.nix pkgs.nix
pkgs.netcat-gnu pkgs.netcat-gnu
pkgs.bc
pkgs.procps
pkgs.sshpass pkgs.sshpass
] ]
} \ }
--set QVM_LIB_DIR "$out/lib/qvm" \
--set QVM_BIN_DIR "$out/bin"
done
runHook postInstall
''; '';
meta = with pkgs.lib; { meta = with pkgs.lib; {
@ -150,24 +125,20 @@
qemu qemu
openssh openssh
jq jq
coreutils
gnused
gnugrep
nix nix
netcat-gnu netcat-gnu
bc
procps
sshpass sshpass
# Development tools # Development tools
shellcheck go
shfmt gopls
gotools
]; ];
shellHook = '' shellHook = ''
export QVM_LIB_DIR="$(pwd)/lib" echo "QVM development environment (Go)"
echo "QVM development environment" echo "Build: go build ./cmd/qvm"
echo "Library directory: $QVM_LIB_DIR" echo "Run: ./qvm status"
''; '';
}; };
} }

1059
flake/default-vm/flake.lock generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -8,11 +8,6 @@
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
nixos-generators = {
url = "github:nix-community/nixos-generators";
inputs.nixpkgs.follows = "nixpkgs";
};
opencode.url = "github:anomalyco/opencode"; opencode.url = "github:anomalyco/opencode";
common.url = "git+https://git.joshuabell.xyz/ringofstorms/dotfiles?dir=flakes/common"; common.url = "git+https://git.joshuabell.xyz/ringofstorms/dotfiles?dir=flakes/common";
ros_neovim.url = "git+https://git.joshuabell.xyz/ringofstorms/nvim"; ros_neovim.url = "git+https://git.joshuabell.xyz/ringofstorms/nvim";
@ -22,7 +17,6 @@
{ {
self, self,
nixpkgs, nixpkgs,
nixos-generators,
... ...
}@inputs: }@inputs:
let let
@ -52,6 +46,33 @@
allowUnfreePredicate = (_: true); allowUnfreePredicate = (_: true);
}; };
# Root filesystem configuration for disk image
# Use /dev/vda1 directly instead of by-label to avoid initrd label detection issues
fileSystems."/" = {
device = "/dev/vda1";
autoResize = true;
fsType = "ext4";
};
# Boot loader configuration for disk image
boot.loader.grub.device = lib.mkDefault "/dev/vda";
# Explicitly load virtio block device module in initrd
boot.initrd.availableKernelModules = [ "virtio_blk" "virtio_pci" "virtio" ];
# Serial console for headless operation with QEMU -nographic
boot.kernelParams = [ "console=ttyS0,115200n8" ];
# GRUB serial console configuration
boot.loader.grub.extraConfig = ''
serial --unit=0 --speed=115200
terminal_input serial
terminal_output serial
'';
# Getty on serial console for login prompt
systemd.services."serial-getty@ttyS0".enable = true;
# Distinctive hostname for easy identification # Distinctive hostname for easy identification
networking.hostName = "qvm-dev"; networking.hostName = "qvm-dev";
@ -184,9 +205,60 @@
SCCACHE_DIR = "/cache/sccache"; SCCACHE_DIR = "/cache/sccache";
}; };
# Ensure workspace directory exists # Ensure workspace and cache directories exist
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d /workspace 0755 root root -" "d /workspace 0755 root root -"
"d /cache 0755 root root -"
"d /cache/cargo 0755 root root -"
"d /cache/target 0755 root root -"
"d /cache/pnpm 0755 root root -"
"d /cache/sccache 0755 root root -"
];
# Systemd mount units for cache directories
# The NixOS VM runner doesn't include custom fileSystems entries in the generated fstab,
# so we use systemd mount units to automount the 9p virtfs shares at boot.
systemd.mounts = [
{
what = "cargo_home";
where = "/cache/cargo";
type = "9p";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "cargo_target";
where = "/cache/target";
type = "9p";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "pnpm_store";
where = "/cache/pnpm";
type = "9p";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "sccache";
where = "/cache/sccache";
type = "9p";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "opencode_config";
where = "/root/.config/opencode";
type = "9p";
options = "trans=virtio,version=9p2000.L,msize=104857600,nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
]; ];
# Essential packages for development # Essential packages for development
@ -224,6 +296,16 @@
# GB disk size # GB disk size
virtualisation.diskSize = 40 * 1024; virtualisation.diskSize = 40 * 1024;
# NOTE: Using 9p virtfs for filesystem sharing
# The NixOS VM runner doesn't support virtio-fs out of the box.
# We use 9p (-virtfs) which is the standard method for QEMU VMs.
#
# See: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualisation/qemu-vm.nix#L530
# The sharedDirectories option hardcodes: -virtfs local,path=...,security_model=...
#
# 9p mounts are configured via QEMU_OPTS environment variable:
# -virtfs local,path=$HOST_PATH,mount_tag=$TAG,security_model=mapped-xattr,msize=104857600
system.stateVersion = stateVersion; system.stateVersion = stateVersion;
}; };
@ -243,8 +325,28 @@
# Runnable VM script (./result/bin/run-qvm-dev-vm) # Runnable VM script (./result/bin/run-qvm-dev-vm)
packages.${system} = { packages.${system} = {
# QCOW2 disk image for base VM
# Using make-disk-image.nix with sufficient memSize to avoid OOM during build
default = import "${nixpkgs}/nixos/lib/make-disk-image.nix" {
inherit pkgs;
lib = nixpkgs.lib;
config = baseVm.config;
# Disk image settings
format = "qcow2";
diskSize = "auto";
additionalSpace = "2G"; # Extra space beyond closure size (default 512M)
partitionTableType = "legacy"; # Use simple MBR instead of hybrid
label = "nixos"; # Explicit label matching fileSystems."/" device
# CRITICAL: Increase build VM memory to 16GB for large closures
# The closure includes NixOS + home-manager + opencode + dev tools (~2GB+)
# Default 512MB and even 2GB was insufficient, causing OOM during cptofs
memSize = 16384;
};
# Keep the runner script as an alternative for debugging
vm = baseVm.config.system.build.vm; vm = baseVm.config.system.build.vm;
default = baseVm.config.system.build.vm;
}; };
apps.${system}.default = { apps.${system}.default = {

17
go.mod Normal file
View file

@ -0,0 +1,17 @@
module qvm
go 1.25.5
require (
github.com/BurntSushi/toml v1.6.0
github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242
github.com/samber/mo v1.16.0
github.com/spf13/cobra v1.10.2
)
require (
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/spf13/pflag v1.0.9 // indirect
golang.org/x/sys v0.30.0 // indirect
)

56
go.sum Normal file
View file

@ -0,0 +1,56 @@
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e h1:SCnqm8SjSa0QqRxXbo5YY//S+OryeJioe17nK+iDZpg=
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e/go.mod h1:o129ljs6alsIQTc8d6eweihqpmmrbxZ2g1jhgjhPykI=
github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242 h1:rh6rt8pF5U4iyQ86h6lRDenJoX4ht2wFnZXB9ogIrIM=
github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242/go.mod h1:LGHUtlhsY4vRGM6AHejEQKVI5e3eHbSylMHwTSpQtVw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/samber/mo v1.16.0 h1:qpEPCI63ou6wXlsNDMLE0IIN8A+devbGX/K1xdgr4b4=
github.com/samber/mo v1.16.0/go.mod h1:DlgzJ4SYhOh41nP1L9kh9rDNERuf8IqWSAs+gj2Vxag=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

47
internal/config/config.go Normal file
View file

@ -0,0 +1,47 @@
package config
import (
"fmt"
"os"
"strconv"
"github.com/BurntSushi/toml"
)
type Config struct {
VM VMConfig `toml:"vm"`
}
type VMConfig struct {
Memory string `toml:"memory"`
CPUs int `toml:"cpus"`
}
func Load() (*Config, error) {
cfg := &Config{
VM: VMConfig{
Memory: "30G",
CPUs: 30,
},
}
if _, err := os.Stat(ConfigFile); err == nil {
if _, err := toml.DecodeFile(ConfigFile, cfg); err != nil {
return nil, fmt.Errorf("failed to parse config file %s: %w", ConfigFile, err)
}
}
if memEnv := os.Getenv("QVM_MEMORY"); memEnv != "" {
cfg.VM.Memory = memEnv
}
if cpusEnv := os.Getenv("QVM_CPUS"); cpusEnv != "" {
cpus, err := strconv.Atoi(cpusEnv)
if err != nil {
return nil, fmt.Errorf("QVM_CPUS must be a valid integer: %w", err)
}
cfg.VM.CPUs = cpus
}
return cfg, nil
}

125
internal/config/paths.go Normal file
View file

@ -0,0 +1,125 @@
// Package config handles QVM configuration loading and management.
package config
import (
"os"
"path/filepath"
)
// XDG-compliant directory paths matching bash lib/common.sh exactly
var (
// DataDir is the base directory for QVM data files (base image, runner)
// Defaults to $HOME/.local/share/qvm
DataDir = getXDGPath("XDG_DATA_HOME", ".local/share", "qvm")
// StateDir is the directory for QVM runtime state (overlay, PID, port files)
// Defaults to $HOME/.local/state/qvm
StateDir = getXDGPath("XDG_STATE_HOME", ".local/state", "qvm")
// CacheDir is the directory for shared build caches (cargo, pnpm, sccache)
// Defaults to $HOME/.cache/qvm
CacheDir = getXDGPath("XDG_CACHE_HOME", ".cache", "qvm")
// ConfigDir is the directory for QVM configuration (flake, qvm.toml)
// Defaults to $HOME/.config/qvm
ConfigDir = getXDGPath("XDG_CONFIG_HOME", ".config", "qvm")
)
// Path constants for VM artifacts
var (
// BaseImage is the path to the base VM image (read-only)
BaseImage = filepath.Join(DataDir, "base.qcow2")
// VMRunner is the path to the VM runner script
VMRunner = filepath.Join(DataDir, "run-vm")
// Overlay is the path to the VM overlay image (copy-on-write)
Overlay = filepath.Join(StateDir, "qvm-dev.qcow2")
// PIDFile is the path to the VM process ID file
PIDFile = filepath.Join(StateDir, "vm.pid")
// SSHPortFile is the path to the SSH port file
SSHPortFile = filepath.Join(StateDir, "ssh.port")
// SerialLog is the path to the VM serial console log
SerialLog = filepath.Join(StateDir, "serial.log")
// WorkspacesFile is the path to the workspaces registry JSON
WorkspacesFile = filepath.Join(StateDir, "workspaces.json")
// QMPSocket is the path to the QMP (QEMU Machine Protocol) socket
QMPSocket = filepath.Join(StateDir, "qmp.sock")
// UserFlake is the path to the user's customizable NixOS flake
UserFlake = filepath.Join(ConfigDir, "flake")
// ConfigFile is the path to the QVM TOML configuration file
ConfigFile = filepath.Join(ConfigDir, "qvm.toml")
)
// Cache directories for 9p mounts (shared between host and VM)
var (
// CargoHome is the shared Cargo registry/cache directory
CargoHome = filepath.Join(CacheDir, "cargo-home")
// CargoTarget is the shared Cargo build artifacts directory
CargoTarget = filepath.Join(CacheDir, "cargo-target")
// PnpmStore is the shared pnpm content-addressable store
PnpmStore = filepath.Join(CacheDir, "pnpm-store")
// Sccache is the shared sccache compilation cache
Sccache = filepath.Join(CacheDir, "sccache")
)
// Host config directories to mount in VM (read-write for tools that need it)
var (
// HostOpencodeConfig is the path to the host's opencode configuration
// Defaults to $HOME/.config/opencode
HostOpencodeConfig = getXDGPath("XDG_CONFIG_HOME", ".config", "opencode")
)
// getXDGPath returns an XDG-compliant path with fallback.
// Args:
// - xdgEnv: XDG environment variable to check (e.g., "XDG_DATA_HOME")
// - fallbackPath: relative path from $HOME if xdgEnv is not set (e.g., ".local/share")
// - suffix: additional path suffix to append (e.g., "qvm")
//
// Returns the resolved absolute path.
func getXDGPath(xdgEnv, fallbackPath, suffix string) string {
base := os.Getenv(xdgEnv)
if base == "" {
home := os.Getenv("HOME")
if home == "" {
// Fallback to current directory if HOME is not set
home = "."
}
base = filepath.Join(home, fallbackPath)
}
return filepath.Join(base, suffix)
}
// EnsureDirs creates all required QVM directories if they don't exist.
// Returns error if directory creation fails.
func EnsureDirs() error {
dirs := []string{
DataDir,
StateDir,
CacheDir,
ConfigDir,
CargoHome,
CargoTarget,
PnpmStore,
Sccache,
}
for _, dir := range dirs {
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,27 @@
package logging
import (
"fmt"
"os"
)
// Simple logging package matching bash script format:
// [INFO] message
// [WARN] message
// [ERROR] message
// No timestamps or additional metadata
// Info prints an informational message
func Info(msg string) {
fmt.Printf("[INFO] %s\n", msg)
}
// Warn prints a warning message
func Warn(msg string) {
fmt.Printf("[WARN] %s\n", msg)
}
// Error prints an error message
func Error(msg string) {
fmt.Fprintf(os.Stderr, "[ERROR] %s\n", msg)
}

75
internal/qmp/client.go Normal file
View file

@ -0,0 +1,75 @@
package qmp
import (
"encoding/json"
"fmt"
"time"
"github.com/digitalocean/go-qemu/qmp"
"github.com/samber/mo"
)
type Client struct {
monitor qmp.Monitor
}
type VMStatus struct {
Running bool
Singlestep bool
Status string
}
func Connect(socketPath string) mo.Result[*Client] {
monitor, err := qmp.NewSocketMonitor("unix", socketPath, 2*time.Second)
if err != nil {
return mo.Err[*Client](fmt.Errorf("failed to create socket monitor: %w", err))
}
if err := monitor.Connect(); err != nil {
return mo.Err[*Client](fmt.Errorf("failed to connect to QMP socket: %w", err))
}
return mo.Ok(&Client{monitor: monitor})
}
func (c *Client) Status() mo.Result[VMStatus] {
type statusResult struct {
ID string `json:"id"`
Return struct {
Running bool `json:"running"`
Singlestep bool `json:"singlestep"`
Status string `json:"status"`
} `json:"return"`
}
cmd := []byte(`{"execute":"query-status"}`)
raw, err := c.monitor.Run(cmd)
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to execute query-status: %w", err))
}
var result statusResult
if err := json.Unmarshal(raw, &result); err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to parse status response: %w", err))
}
return mo.Ok(VMStatus{
Running: result.Return.Running,
Singlestep: result.Return.Singlestep,
Status: result.Return.Status,
})
}
func (c *Client) Shutdown() mo.Result[struct{}] {
cmd := []byte(`{"execute":"system_powerdown"}`)
_, err := c.monitor.Run(cmd)
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to execute system_powerdown: %w", err))
}
return mo.Ok(struct{}{})
}
func (c *Client) Close() error {
return c.monitor.Disconnect()
}

View file

@ -0,0 +1,234 @@
package virtiofsd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/samber/mo"
)
type Manager struct {
stateDir string
pids map[string]int
}
func NewManager(stateDir string) *Manager {
return &Manager{
stateDir: stateDir,
pids: make(map[string]int),
}
}
func findVirtiofsd() (string, error) {
// First try PATH
if path, err := exec.LookPath("virtiofsd"); err == nil {
return path, nil
}
// Fall back to nix
cmd := exec.Command("nix", "path-info", "nixpkgs#virtiofsd")
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("virtiofsd not found in PATH and nix lookup failed: %w", err)
}
storePath := strings.TrimSpace(string(output))
virtiofsdPath := filepath.Join(storePath, "bin", "virtiofsd")
if _, err := os.Stat(virtiofsdPath); err != nil {
return "", fmt.Errorf("virtiofsd binary not found at %s", virtiofsdPath)
}
return virtiofsdPath, nil
}
func (m *Manager) StartMount(mount Mount) mo.Result[int] {
if err := m.CleanStale([]Mount{mount}); err != nil {
return mo.Err[int](fmt.Errorf("failed to clean stale socket for %s: %w", mount.Tag, err))
}
if err := os.MkdirAll(mount.HostPath, 0755); err != nil {
return mo.Err[int](fmt.Errorf("failed to create host directory %s: %w", mount.HostPath, err))
}
virtiofsd, err := findVirtiofsd()
if err != nil {
return mo.Err[int](err)
}
cmd := exec.Command(virtiofsd,
"--socket-path="+mount.SocketPath,
"--shared-dir="+mount.HostPath,
"--cache=auto",
)
if err := cmd.Start(); err != nil {
return mo.Err[int](fmt.Errorf("failed to start virtiofsd for %s: %w", mount.Tag, err))
}
pid := cmd.Process.Pid
m.pids[mount.Tag] = pid
pidFile := m.pidFilePath(mount.Tag)
if err := os.WriteFile(pidFile, []byte(strconv.Itoa(pid)), 0644); err != nil {
_ = cmd.Process.Kill()
return mo.Err[int](fmt.Errorf("failed to write PID file for %s: %w", mount.Tag, err))
}
for i := 0; i < 50; i++ {
if _, err := os.Stat(mount.SocketPath); err == nil {
return mo.Ok(pid)
}
time.Sleep(100 * time.Millisecond)
}
_ = m.StopMount(mount)
return mo.Err[int](fmt.Errorf("virtiofsd socket for %s did not appear within 5 seconds", mount.Tag))
}
func (m *Manager) StopMount(mount Mount) mo.Result[struct{}] {
pidFile := m.pidFilePath(mount.Tag)
pidBytes, err := os.ReadFile(pidFile)
if err != nil {
if os.IsNotExist(err) {
return mo.Ok(struct{}{})
}
return mo.Err[struct{}](fmt.Errorf("failed to read PID file for %s: %w", mount.Tag, err))
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return mo.Err[struct{}](fmt.Errorf("invalid PID in file for %s: %w", mount.Tag, err))
}
process, err := os.FindProcess(pid)
if err != nil {
_ = os.Remove(pidFile)
_ = os.Remove(mount.SocketPath)
return mo.Ok(struct{}{})
}
if err := process.Signal(syscall.SIGTERM); err != nil {
_ = os.Remove(pidFile)
_ = os.Remove(mount.SocketPath)
return mo.Ok(struct{}{})
}
done := make(chan bool, 1)
go func() {
_, _ = process.Wait()
done <- true
}()
select {
case <-done:
case <-time.After(5 * time.Second):
_ = process.Signal(syscall.SIGKILL)
<-done
}
_ = os.Remove(pidFile)
_ = os.Remove(mount.SocketPath)
delete(m.pids, mount.Tag)
return mo.Ok(struct{}{})
}
func (m *Manager) StartAll(mounts []Mount) mo.Result[struct{}] {
started := []Mount{}
for _, mount := range mounts {
result := m.StartMount(mount)
if result.IsError() {
for i := len(started) - 1; i >= 0; i-- {
_ = m.StopMount(started[i])
}
return mo.Err[struct{}](fmt.Errorf("failed to start mount %s: %w", mount.Tag, result.Error()))
}
started = append(started, mount)
}
return mo.Ok(struct{}{})
}
func (m *Manager) StopAll() mo.Result[struct{}] {
files, err := filepath.Glob(filepath.Join(m.stateDir, "virtiofsd-*.pid"))
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to list PID files: %w", err))
}
for _, pidFile := range files {
pidBytes, err := os.ReadFile(pidFile)
if err != nil {
continue
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
continue
}
if process, err := os.FindProcess(pid); err == nil {
_ = process.Signal(syscall.SIGTERM)
time.Sleep(100 * time.Millisecond)
_ = process.Signal(syscall.SIGKILL)
}
_ = os.Remove(pidFile)
}
sockFiles, err := filepath.Glob(filepath.Join(m.stateDir, "*.sock"))
if err == nil {
for _, sockFile := range sockFiles {
_ = os.Remove(sockFile)
}
}
m.pids = make(map[string]int)
return mo.Ok(struct{}{})
}
func (m *Manager) CleanStale(mounts []Mount) error {
for _, mount := range mounts {
if _, err := os.Stat(mount.SocketPath); err == nil {
pidFile := m.pidFilePath(mount.Tag)
pidBytes, err := os.ReadFile(pidFile)
if err != nil {
_ = os.Remove(mount.SocketPath)
continue
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
_ = os.Remove(mount.SocketPath)
_ = os.Remove(pidFile)
continue
}
process, err := os.FindProcess(pid)
if err != nil {
_ = os.Remove(mount.SocketPath)
_ = os.Remove(pidFile)
continue
}
if err := process.Signal(syscall.Signal(0)); err != nil {
_ = os.Remove(mount.SocketPath)
_ = os.Remove(pidFile)
}
}
}
return nil
}
func (m *Manager) pidFilePath(tag string) string {
return filepath.Join(m.stateDir, "virtiofsd-"+tag+".pid")
}

View file

@ -0,0 +1,51 @@
package virtiofsd
import (
"path/filepath"
"qvm/internal/config"
)
// Mount represents a single virtiofsd mount configuration
type Mount struct {
Tag string // Mount tag (e.g., "cargo_home", "ws_abc12345")
HostPath string // Path on host to share
SocketPath string // Path to virtiofsd socket
}
// DefaultCacheMounts returns the standard cache mounts for cargo, pnpm, and sccache.
// These are shared across all projects and mounted at VM start.
func DefaultCacheMounts() []Mount {
return []Mount{
{
Tag: "cargo_home",
HostPath: config.CargoHome,
SocketPath: filepath.Join(config.StateDir, "cargo_home.sock"),
},
{
Tag: "cargo_target",
HostPath: config.CargoTarget,
SocketPath: filepath.Join(config.StateDir, "cargo_target.sock"),
},
{
Tag: "pnpm_store",
HostPath: config.PnpmStore,
SocketPath: filepath.Join(config.StateDir, "pnpm_store.sock"),
},
{
Tag: "sccache",
HostPath: config.Sccache,
SocketPath: filepath.Join(config.StateDir, "sccache.sock"),
},
}
}
// WorkspaceMount creates a Mount configuration for a single workspace.
// mountTag should be the workspace's mount tag (e.g., "ws_abc12345")
// hostPath is the absolute path on the host to share
func WorkspaceMount(mountTag, hostPath string) Mount {
return Mount{
Tag: mountTag,
HostPath: hostPath,
SocketPath: filepath.Join(config.StateDir, mountTag+".sock"),
}
}

379
internal/vm/lifecycle.go Normal file
View file

@ -0,0 +1,379 @@
package vm
import (
"fmt"
"os"
"os/exec"
"qvm/internal/config"
"qvm/internal/logging"
"qvm/internal/workspace"
"strconv"
"strings"
"syscall"
"time"
"github.com/samber/mo"
)
// VMStatus represents the current state of the VM
type VMStatus struct {
Running bool
PID int
SSHPort int
}
// Mount represents a 9p filesystem mount
type Mount struct {
Tag string
HostPath string
}
// Start launches the VM with all configured mounts.
// Sequence:
// 1. Check if VM is already running (via PID file and process check)
// 2. Ensure all required directories exist
// 3. Build mount list (cache mounts + workspace mounts from registry)
// 4. Find available SSH port
// 5. Build and start VM via runner script with 9p virtfs mounts
// 6. Write PID and SSH port to state files
// 7. Wait for SSH to become available (60 second timeout)
//
// Returns error if any step fails.
func Start(cfg *config.Config, reg *workspace.Registry) mo.Result[struct{}] {
// 1. Check if already running
if IsRunning() {
return mo.Err[struct{}](fmt.Errorf("VM is already running"))
}
// 2. Ensure directories exist
if err := config.EnsureDirs(); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to create directories: %w", err))
}
// 2a. Check if base image exists
if _, err := os.Stat(config.BaseImage); os.IsNotExist(err) {
return mo.Err[struct{}](fmt.Errorf("base image not found at %s - run 'qvm rebuild' first", config.BaseImage))
}
// 2b. Create overlay if it doesn't exist (backed by base image)
if _, err := os.Stat(config.Overlay); os.IsNotExist(err) {
if _, err := os.Stat(config.BaseImage); os.IsNotExist(err) {
return mo.Err[struct{}](fmt.Errorf("base image not found at %s - run 'qvm rebuild' first", config.BaseImage))
}
logging.Info("Creating overlay image backed by base image...")
cmd := exec.Command("qemu-img", "create", "-f", "qcow2",
"-F", "qcow2", "-b", config.BaseImage, config.Overlay)
if output, err := cmd.CombinedOutput(); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to create overlay: %s: %w", string(output), err))
}
}
// 3. Build mount list (for 9p virtfs)
mounts := []Mount{
{Tag: "cargo_home", HostPath: config.CargoHome},
{Tag: "cargo_target", HostPath: config.CargoTarget},
{Tag: "pnpm_store", HostPath: config.PnpmStore},
{Tag: "sccache", HostPath: config.Sccache},
}
// Add opencode config mount if directory exists
if _, err := os.Stat(config.HostOpencodeConfig); err == nil {
mounts = append(mounts, Mount{
Tag: "opencode_config",
HostPath: config.HostOpencodeConfig,
})
}
// Add workspace mounts from registry
for _, ws := range reg.List() {
mounts = append(mounts, Mount{
Tag: ws.MountTag,
HostPath: ws.HostPath,
})
}
// 4. Find available SSH port
sshPort, err := findAvailablePort(2222)
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to find available SSH port: %w", err))
}
// 5. Build QEMU command and start VM directly
args := buildQEMUArgs(cfg, sshPort, mounts)
cmd := exec.Command("qemu-system-x86_64", args...)
cmd.Stdout = nil
cmd.Stderr = nil
cmd.Stdin = nil
if err := cmd.Run(); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to start QEMU: %w", err))
}
logging.Info("Waiting for VM to daemonize...")
pidFileReady := false
for i := 0; i < 10; i++ {
time.Sleep(500 * time.Millisecond)
if _, err := os.Stat(config.PIDFile); err == nil {
pidFileReady = true
break
}
}
if !pidFileReady {
return mo.Err[struct{}](fmt.Errorf("QEMU did not create PID file after 5 seconds"))
}
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to read PID file: %w", err))
}
pid := strings.TrimSpace(string(pidBytes))
logging.Info("VM started with PID " + pid)
if err := os.WriteFile(config.SSHPortFile, []byte(strconv.Itoa(sshPort)), 0644); err != nil {
if pidBytes, err := os.ReadFile(config.PIDFile); err == nil {
if pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))); err == nil {
if process, err := os.FindProcess(pid); err == nil {
_ = process.Kill()
}
}
}
_ = os.Remove(config.PIDFile)
return mo.Err[struct{}](fmt.Errorf("failed to write SSH port file: %w", err))
}
// 7. Wait for SSH
if err := waitForSSH(sshPort, 120*time.Second); err != nil {
_ = cmd.Process.Kill()
_ = os.Remove(config.PIDFile)
_ = os.Remove(config.SSHPortFile)
return mo.Err[struct{}](fmt.Errorf("VM started but SSH not available: %w", err))
}
return mo.Ok(struct{}{})
}
// Stop gracefully shuts down the VM.
// Sequence:
// 1. Read PID from file
// 2. Send SIGTERM to the process
// 3. Wait up to 30 seconds for graceful shutdown (poll every second)
// 4. If still running, send SIGKILL
// 5. Clean up PID and port files
//
// Returns success even if VM is not running (idempotent).
func Stop() mo.Result[struct{}] {
// 1. Read PID file
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
if os.IsNotExist(err) {
// Not running
return mo.Ok(struct{}{})
}
return mo.Err[struct{}](fmt.Errorf("failed to read PID file: %w", err))
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return mo.Err[struct{}](fmt.Errorf("invalid PID in file: %w", err))
}
// Check if process exists
process, err := os.FindProcess(pid)
if err != nil {
// Process doesn't exist, clean up
cleanupStateFiles()
return mo.Ok(struct{}{})
}
// 2. Send SIGTERM for graceful shutdown
if err := process.Signal(syscall.SIGTERM); err != nil {
// Process already gone
cleanupStateFiles()
return mo.Ok(struct{}{})
}
// 3. Wait up to 30 seconds for process to exit (poll every second)
for i := 0; i < 30; i++ {
time.Sleep(1 * time.Second)
// Check if process still exists by sending signal 0
if err := process.Signal(syscall.Signal(0)); err != nil {
// Process no longer exists
cleanupStateFiles()
return mo.Ok(struct{}{})
}
}
// 4. Timeout, force kill
_ = process.Signal(syscall.SIGKILL)
// Wait a moment for SIGKILL to take effect
time.Sleep(1 * time.Second)
// 5. Clean up state files
cleanupStateFiles()
return mo.Ok(struct{}{})
}
// cleanupStateFiles removes all VM state files
func cleanupStateFiles() {
_ = os.Remove(config.PIDFile)
_ = os.Remove(config.SSHPortFile)
_ = os.Remove(config.QMPSocket)
}
// Status returns the current VM status (running, PID, SSH port).
func Status() mo.Result[VMStatus] {
status := VMStatus{
Running: false,
PID: 0,
SSHPort: 0,
}
if !IsRunning() {
return mo.Ok(status)
}
// Read PID
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to read PID file: %w", err))
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("invalid PID in file: %w", err))
}
// Read SSH port
portBytes, err := os.ReadFile(config.SSHPortFile)
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to read SSH port file: %w", err))
}
sshPort, err := strconv.Atoi(strings.TrimSpace(string(portBytes)))
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("invalid SSH port in file: %w", err))
}
status.Running = true
status.PID = pid
status.SSHPort = sshPort
return mo.Ok(status)
}
// Reset stops the VM and deletes the overlay image.
// This returns the VM to a fresh state based on the base image.
func Reset() mo.Result[struct{}] {
// Stop VM if running
stopResult := Stop()
if stopResult.IsError() {
return mo.Err[struct{}](fmt.Errorf("failed to stop VM: %w", stopResult.Error()))
}
// Delete overlay image
if err := os.Remove(config.Overlay); err != nil && !os.IsNotExist(err) {
return mo.Err[struct{}](fmt.Errorf("failed to delete overlay: %w", err))
}
return mo.Ok(struct{}{})
}
// IsRunning performs a quick check if the VM is running by checking
// the PID file and verifying the process exists.
func IsRunning() bool {
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
return false
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return false
}
// Check if process exists by sending signal 0
process, err := os.FindProcess(pid)
if err != nil {
return false
}
err = process.Signal(syscall.Signal(0))
return err == nil
}
func buildQEMUArgs(cfg *config.Config, sshPort int, mounts []Mount) []string {
// Boot directly from the qcow2 disk image (has GRUB installed)
// Do NOT use -kernel/-initrd - that's for NixOS VM runner which requires special 9p mounts
args := []string{
"-machine", "q35",
"-accel", "kvm",
"-cpu", "host",
"-m", cfg.VM.Memory,
"-smp", strconv.Itoa(cfg.VM.CPUs),
"-display", "none",
"-daemonize",
"-pidfile", config.PIDFile,
"-drive", fmt.Sprintf("file=%s,if=virtio,format=qcow2", config.Overlay),
"-netdev", fmt.Sprintf("user,id=n0,hostfwd=tcp::%d-:22", sshPort),
"-device", "virtio-net-pci,netdev=n0",
"-serial", fmt.Sprintf("file:%s", config.SerialLog),
}
// Add 9p mounts for cache directories and workspaces
for _, mount := range mounts {
args = append(args,
"-virtfs", fmt.Sprintf("local,path=%s,mount_tag=%s,security_model=mapped-xattr,id=%s",
mount.HostPath, mount.Tag, mount.Tag),
)
}
return args
}
// findAvailablePort finds an available TCP port starting from the given base port.
func findAvailablePort(basePort int) (int, error) {
const maxAttempts = 100
for i := 0; i < maxAttempts; i++ {
port := basePort + i
cmd := exec.Command("nc", "-z", "localhost", strconv.Itoa(port))
if err := cmd.Run(); err != nil {
return port, nil
}
}
return 0, fmt.Errorf("could not find available port after %d attempts", maxAttempts)
}
// waitForSSH waits for SSH to become available on the given port.
// Uses sshpass with password 'root' to test connection.
func waitForSSH(port int, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
cmd := exec.Command("sshpass", "-p", "root",
"ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "ConnectTimeout=1",
"-p", strconv.Itoa(port),
"root@localhost",
"exit 0")
if err := cmd.Run(); err == nil {
return nil
}
time.Sleep(1 * time.Second)
}
return fmt.Errorf("SSH did not become available within %v", timeout)
}

54
internal/vm/qemu.go Normal file
View file

@ -0,0 +1,54 @@
package vm
import (
"fmt"
"qvm/internal/config"
"qvm/internal/virtiofsd"
"strconv"
)
func buildQEMUCommand(cfg *config.Config, sshPort int, mounts []virtiofsd.Mount) []string {
args := []string{
"qemu-system-x86_64",
"-enable-kvm",
}
memSize := cfg.VM.Memory
args = append(args,
"-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%s,share=on", memSize),
"-numa", "node,memdev=mem",
)
args = append(args,
"-smp", strconv.Itoa(cfg.VM.CPUs),
)
args = append(args,
"-drive", fmt.Sprintf("if=virtio,file=%s,format=qcow2", config.Overlay),
)
args = append(args,
"-nic", fmt.Sprintf("user,model=virtio-net-pci,hostfwd=tcp::%d-:22", sshPort),
)
args = append(args,
"-serial", fmt.Sprintf("file:%s", config.SerialLog),
)
args = append(args,
"-qmp", fmt.Sprintf("unix:%s,server,nowait", config.QMPSocket),
)
args = append(args,
"-display", "none",
)
for _, mount := range mounts {
args = append(args,
"-chardev", fmt.Sprintf("socket,id=%s,path=%s", mount.Tag, mount.SocketPath),
"-device", fmt.Sprintf("vhost-user-fs-pci,queue-size=1024,chardev=%s,tag=%s", mount.Tag, mount.Tag),
)
}
return args
}

View file

@ -0,0 +1,139 @@
package workspace
import (
"crypto/sha256"
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/samber/mo"
)
type Workspace struct {
HostPath string `json:"host_path"`
Hash string `json:"hash"`
MountTag string `json:"mount_tag"`
GuestPath string `json:"guest_path"`
}
type Registry struct {
filePath string
workspaces map[string]Workspace
}
// Hash generates an 8-character hash from a path, matching bash behavior:
// echo -n "$path" | sha256sum | cut -c1-8
func Hash(path string) string {
h := sha256.Sum256([]byte(path))
return fmt.Sprintf("%x", h)[:8]
}
// NewRegistry creates a new empty registry
func NewRegistry(filePath string) *Registry {
return &Registry{
filePath: filePath,
workspaces: make(map[string]Workspace),
}
}
// Load reads the registry from a JSON file
func Load(filePath string) mo.Result[*Registry] {
registry := NewRegistry(filePath)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return mo.Ok(registry)
}
data, err := os.ReadFile(filePath)
if err != nil {
return mo.Err[*Registry](fmt.Errorf("failed to read workspaces file: %w", err))
}
if len(data) == 0 {
return mo.Ok(registry)
}
var workspaceList []Workspace
if err := json.Unmarshal(data, &workspaceList); err != nil {
return mo.Err[*Registry](fmt.Errorf("failed to parse workspaces JSON: %w", err))
}
for _, ws := range workspaceList {
registry.workspaces[ws.HostPath] = ws
}
return mo.Ok(registry)
}
// Save writes the registry to the JSON file
func (r *Registry) Save() mo.Result[struct{}] {
dir := filepath.Dir(r.filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to create directory: %w", err))
}
workspaceList := make([]Workspace, 0, len(r.workspaces))
for _, ws := range r.workspaces {
workspaceList = append(workspaceList, ws)
}
data, err := json.MarshalIndent(workspaceList, "", " ")
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to marshal JSON: %w", err))
}
if err := os.WriteFile(r.filePath, data, 0644); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to write workspaces file: %w", err))
}
return mo.Ok(struct{}{})
}
// Register adds a workspace to the registry if it doesn't already exist
func (r *Registry) Register(hostPath string) mo.Result[*Workspace] {
absPath, err := filepath.Abs(hostPath)
if err != nil {
return mo.Err[*Workspace](fmt.Errorf("failed to resolve absolute path: %w", err))
}
if existing, exists := r.workspaces[absPath]; exists {
return mo.Ok(&existing)
}
hash := Hash(absPath)
ws := Workspace{
HostPath: absPath,
Hash: hash,
MountTag: fmt.Sprintf("ws_%s", hash),
GuestPath: fmt.Sprintf("/workspace/%s", hash),
}
r.workspaces[absPath] = ws
return mo.Ok(&ws)
}
// List returns all registered workspaces
func (r *Registry) List() []Workspace {
result := make([]Workspace, 0, len(r.workspaces))
for _, ws := range r.workspaces {
result = append(result, ws)
}
return result
}
// Find looks up a workspace by host path
func (r *Registry) Find(hostPath string) mo.Option[Workspace] {
absPath, err := filepath.Abs(hostPath)
if err != nil {
if ws, exists := r.workspaces[hostPath]; exists {
return mo.Some(ws)
}
return mo.None[Workspace]()
}
if ws, exists := r.workspaces[absPath]; exists {
return mo.Some(ws)
}
return mo.None[Workspace]()
}

View file

@ -1,185 +0,0 @@
#!/usr/bin/env bash
#
# common.sh - Shared functions and configuration for QVM CLI tool
#
# This file defines XDG-compliant directory paths, constants, and utility
# functions used across all qvm-* commands. It should be sourced by each
# command script via: source "${QVM_LIB_DIR}/common.sh"
#
set -euo pipefail
# XDG-compliant directory paths
readonly QVM_DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/qvm"
readonly QVM_STATE_DIR="${XDG_STATE_HOME:-$HOME/.local/state}/qvm"
readonly QVM_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/qvm"
readonly QVM_CONFIG_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/qvm"
# Path constants for VM artifacts
readonly QVM_BASE_IMAGE="$QVM_DATA_DIR/base.qcow2"
readonly QVM_OVERLAY="$QVM_STATE_DIR/overlay.qcow2"
readonly QVM_PID_FILE="$QVM_STATE_DIR/vm.pid"
readonly QVM_SSH_PORT_FILE="$QVM_STATE_DIR/ssh.port"
readonly QVM_SERIAL_LOG="$QVM_STATE_DIR/serial.log"
readonly QVM_WORKSPACES_FILE="$QVM_STATE_DIR/workspaces.json"
readonly QVM_USER_FLAKE="$QVM_CONFIG_DIR/flake"
readonly QVM_VM_RUNNER="$QVM_DATA_DIR/run-vm"
readonly QVM_CONFIG_FILE="$QVM_CONFIG_DIR/qvm.conf"
# Cache directories for 9p mounts (shared between host and VM)
readonly QVM_CARGO_HOME="$QVM_CACHE_DIR/cargo-home"
readonly QVM_CARGO_TARGET="$QVM_CACHE_DIR/cargo-target"
readonly QVM_PNPM_STORE="$QVM_CACHE_DIR/pnpm-store"
readonly QVM_SCCACHE="$QVM_CACHE_DIR/sccache"
# Host config directories to mount in VM (read-write for tools that need it)
readonly QVM_HOST_OPENCODE_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/opencode"
# Color codes (only used if stdout is a TTY)
if [[ -t 1 ]]; then
readonly COLOR_INFO='\033[0;36m' # Cyan
readonly COLOR_WARN='\033[0;33m' # Yellow
readonly COLOR_ERROR='\033[0;31m' # Red
readonly COLOR_RESET='\033[0m' # Reset
else
readonly COLOR_INFO=''
readonly COLOR_WARN=''
readonly COLOR_ERROR=''
readonly COLOR_RESET=''
fi
#
# log_info - Print informational message in cyan
# Usage: log_info "message"
#
log_info() {
echo -e "${COLOR_INFO}[INFO]${COLOR_RESET} $*" >&2
}
#
# log_warn - Print warning message in yellow
# Usage: log_warn "message"
#
log_warn() {
echo -e "${COLOR_WARN}[WARN]${COLOR_RESET} $*" >&2
}
#
# log_error - Print error message in red
# Usage: log_error "message"
#
log_error() {
echo -e "${COLOR_ERROR}[ERROR]${COLOR_RESET} $*" >&2
}
#
# die - Print error message and exit with status 1
# Usage: die "error message"
#
die() {
log_error "$@"
exit 1
}
#
# ensure_dirs - Create all required QVM directories
# Usage: ensure_dirs
#
ensure_dirs() {
mkdir -p "$QVM_DATA_DIR" \
"$QVM_STATE_DIR" \
"$QVM_CACHE_DIR" \
"$QVM_CONFIG_DIR" \
"$QVM_CARGO_HOME" \
"$QVM_CARGO_TARGET" \
"$QVM_PNPM_STORE" \
"$QVM_SCCACHE"
}
#
# is_vm_running - Check if VM process is running
# Returns: 0 if running, 1 if not
# Usage: if is_vm_running; then ... fi
#
is_vm_running() {
if [[ ! -f "$QVM_PID_FILE" ]]; then
return 1
fi
local pid
pid=$(cat "$QVM_PID_FILE")
# Check if process exists and is a QEMU process
if kill -0 "$pid" 2>/dev/null; then
return 0
else
# Stale PID file, remove it
rm -f "$QVM_PID_FILE"
return 1
fi
}
#
# get_ssh_port - Read SSH port from state file
# Returns: SSH port number on stdout
# Usage: port=$(get_ssh_port)
#
get_ssh_port() {
if [[ ! -f "$QVM_SSH_PORT_FILE" ]]; then
die "SSH port file not found. Is the VM running?"
fi
cat "$QVM_SSH_PORT_FILE"
}
#
# workspace_hash - Generate short hash from absolute path
# Args: $1 - absolute path to workspace
# Returns: 8-character hash on stdout
# Usage: hash=$(workspace_hash "/path/to/workspace")
#
workspace_hash() {
local path="$1"
echo -n "$path" | sha256sum | cut -c1-8
}
#
# wait_for_ssh - Wait for SSH to become available on VM
# Args: $1 - SSH port number
# $2 - timeout in seconds (default: 60)
# Returns: 0 if SSH is available, 1 on timeout
# Usage: wait_for_ssh "$port" 30
#
wait_for_ssh() {
local port="${1:-}"
local timeout="${2:-60}"
local elapsed=0
if [[ -z "$port" ]]; then
die "wait_for_ssh requires port argument"
fi
log_info "Waiting for SSH on port $port (timeout: ${timeout}s)..."
while ((elapsed < timeout)); do
# Actually attempt SSH connection to verify sshd is responding
# nc -z only checks if port is open (QEMU opens it immediately)
# We need to verify sshd is actually ready to accept connections
if timeout 2 sshpass -p root ssh \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o LogLevel=ERROR \
-o PubkeyAuthentication=no \
-o PasswordAuthentication=yes \
-o ConnectTimeout=1 \
-p "$port" \
root@localhost "true" 2>/dev/null; then
log_info "SSH is ready"
return 0
fi
sleep 1
((elapsed++))
done
log_error "SSH did not become available within ${timeout}s"
return 1
}