diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..77a4377 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +result +qvm diff --git a/README.md b/README.md index 7eaf5a2..0b4c227 100644 --- a/README.md +++ b/README.md @@ -522,12 +522,13 @@ $(pwd) ──9p──→ /workspace/{hash}/ ## Contributing -Contributions welcome! This is a simple Bash-based tool designed to be readable and hackable. +Contributions welcome! This is a Go CLI tool designed to be readable and maintainable. **Key files:** -- `bin/qvm` - Main dispatcher -- `bin/qvm-*` - Subcommand implementations -- `lib/common.sh` - Shared utilities and paths +- `cmd/qvm/` - CLI command implementations (Cobra) +- `internal/vm/` - VM lifecycle and QEMU management +- `internal/workspace/` - Workspace registration and mounting +- `internal/config/` - Configuration and XDG paths - `flake/default-vm/flake.nix` - Default VM template **Development:** @@ -536,7 +537,12 @@ Contributions welcome! This is a simple Bash-based tool designed to be readable git clone https://github.com/yourusername/qvm cd qvm nix develop -./bin/qvm start + +# Build the Go binary +go build ./cmd/qvm + +# Run locally +./qvm start ``` ## License diff --git a/bin/qvm b/bin/qvm deleted file mode 100755 index 9735e2b..0000000 --- a/bin/qvm +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# -# qvm - Main dispatcher for QVM (QEMU Development VM) commands -# -# This script routes subcommands to their respective qvm-* implementations. -# It sources common.sh for shared configuration and utility functions. -# - -set -euo pipefail - -# Determine script directory for locating sibling scripts -readonly SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" - -# Source common library (use QVM_LIB_DIR from wrapper or relative path for dev) -source "${QVM_LIB_DIR:-${SCRIPT_DIR}/../lib}/common.sh" - -readonly VERSION="0.1.0" - -# -# show_help - Display usage information -# -show_help() { - cat < [args...] - -COMMANDS: - start Start the VM (create if needed) - stop Stop the running VM - run Execute a command in the VM (or start shell if no command) - ssh Open SSH session or run command in VM - status Show VM status and information - rebuild Rebuild the base VM image from flake - reset Delete overlay and start fresh (keeps base image) - clean Remove ALL QVM data (images, state, caches) - -OPTIONS: - -h, --help Show this help message - -v, --version Show version information - -EXAMPLES: - qvm start Start the VM - qvm ssh Open interactive SSH session - qvm run 'ls -la' Run command in VM - qvm status Check if VM is running - qvm stop Stop the VM - -For more information on a specific command, run: - qvm --help -EOF -} - -# -# show_version - Display version information -# -show_version() { - echo "qvm version ${VERSION}" -} - -# -# main - Parse arguments and route to subcommand -# -main() { - # Handle no arguments - if [[ $# -eq 0 ]]; then - show_help - exit 0 - fi - - local subcommand="$1" - shift - - case "$subcommand" in - start|stop|run|ssh|status|rebuild|reset|clean) - # Route to the appropriate qvm-* script - # Use exec to replace this process with the subcommand - exec "${SCRIPT_DIR}/qvm-${subcommand}" "$@" - ;; - help|--help|-h) - show_help - exit 0 - ;; - --version|-v) - show_version - exit 0 - ;; - *) - log_error "Unknown command: ${subcommand}" - echo "" >&2 - show_help >&2 - exit 1 - ;; - esac -} - -main "$@" diff --git a/bin/qvm-clean b/bin/qvm-clean deleted file mode 100755 index 0bdf84d..0000000 --- a/bin/qvm-clean +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-clean - Completely remove all QVM state, images, and caches -# -# This script performs a full cleanup of all QVM-related data: -# - Base image (base.qcow2) -# - VM overlay and state (overlay.qcow2, pid, ssh port, logs, workspaces) -# - Build caches (cargo, pnpm, sccache) -# - Optionally: user configuration (flake) -# -# WARNING: This is destructive and cannot be undone! -# -# Usage: qvm clean [-f|--force] -# -f, --force Skip confirmation prompt -# - -set -euo pipefail - -# Source common library -readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}" -source "${QVM_LIB_DIR}/common.sh" - -# Get path to qvm-stop script -readonly QVM_STOP="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-stop" - -# -# confirm_clean - Prompt user for confirmation -# Args: $1 - whether to delete config (true/false) -# Returns: 0 if user confirms, exits script if user cancels -# -confirm_clean() { - echo - log_warn "This will delete ALL QVM data:" - echo " - Base image: $QVM_DATA_DIR" - echo " - State/overlay: $QVM_STATE_DIR" - echo " - Build caches: $QVM_CACHE_DIR" - echo " - Config/flake: $QVM_CONFIG_DIR" - - echo - log_warn "This operation CANNOT be undone!" - echo "You will need to rebuild the base image from scratch next time." - echo - read -p "Are you absolutely sure? [y/N] " -n 1 -r - echo - - if [[ ! $REPLY =~ ^[Yy]$ ]]; then - log_info "Clean cancelled" - exit 0 - fi -} - -# -# main - Main cleanup orchestration -# -main() { - local force=false - - # Parse arguments - while [[ $# -gt 0 ]]; do - case "$1" in - -f|--force) - force=true - shift - ;; - *) - die "Unknown option: $1" - ;; - esac - done - - # Confirm unless --force is used - if [[ "$force" != "true" ]]; then - confirm_clean - fi - - # Stop VM if running - if is_vm_running; then - log_info "Stopping running VM..." - "$QVM_STOP" - fi - - # Delete directories - log_info "Removing QVM data directories..." - - if [[ -d "$QVM_DATA_DIR" ]]; then - log_info " - Deleting: $QVM_DATA_DIR" - rm -rf "$QVM_DATA_DIR" - fi - - if [[ -d "$QVM_STATE_DIR" ]]; then - log_info " - Deleting: $QVM_STATE_DIR" - rm -rf "$QVM_STATE_DIR" - fi - - if [[ -d "$QVM_CACHE_DIR" ]]; then - log_info " - Deleting: $QVM_CACHE_DIR" - rm -rf "$QVM_CACHE_DIR" - fi - - if [[ -d "$QVM_CONFIG_DIR" ]]; then - log_info " - Deleting: $QVM_CONFIG_DIR" - rm -rf "$QVM_CONFIG_DIR" - fi - - # Print success message - echo - log_info "QVM cleaned successfully!" - echo - echo "All QVM data has been removed from your system." - echo "Next run of 'qvm start' will initialize everything from scratch." - echo -} - -main "$@" diff --git a/bin/qvm-rebuild b/bin/qvm-rebuild deleted file mode 100755 index edea1f0..0000000 --- a/bin/qvm-rebuild +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-rebuild - Build the base qcow2 image from user's flake -# -# This script builds the QVM base image by: -# - Ensuring a user flake exists (copying default if needed) -# - Running nix build on the user's flake configuration -# - Copying the resulting qcow2 to the base image location -# - Optionally warning if VM is running -# - -set -euo pipefail - -# Source common library -QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}" -# shellcheck source=lib/common.sh -source "$QVM_LIB_DIR/common.sh" - -# -# ensure_user_flake - Ensure user's flake exists, copy default if missing -# -ensure_user_flake() { - if [[ -f "$QVM_USER_FLAKE/flake.nix" ]]; then - log_info "Using existing user flake: $QVM_USER_FLAKE/flake.nix" - return 0 - fi - - log_info "User flake not found, copying default template..." - - # Determine default flake location - # In installed version: $QVM_LIB_DIR/../share/qvm/default-vm/ - # In development: $(dirname "$0")/../flake/default-vm/ - local default_flake_dir - - # Try installed location first ($QVM_LIB_DIR is $out/lib/qvm) - if [[ -d "$QVM_LIB_DIR/../../share/qvm/default-vm" ]]; then - default_flake_dir="$QVM_LIB_DIR/../../share/qvm/default-vm" - else - # Fall back to development location - default_flake_dir="$(dirname "$(readlink -f "$0")")/../flake/default-vm" - fi - - if [[ ! -d "$default_flake_dir" ]]; then - die "Default flake template not found at: $default_flake_dir" - fi - - if [[ ! -f "$default_flake_dir/flake.nix" ]]; then - die "Default flake.nix not found at: $default_flake_dir/flake.nix" - fi - - # Create user flake directory and copy template - mkdir -p "$QVM_USER_FLAKE" - cp -r "$default_flake_dir"/* "$QVM_USER_FLAKE/" - - log_info "Default flake copied to: $QVM_USER_FLAKE" - echo "" - echo "You can customize your VM by editing: $QVM_USER_FLAKE/flake.nix" - echo "" -} - -# -# build_vm - Build the VM runner using nix -# -build_base_image() { - log_info "Building VM from flake..." - - # Build the VM output from user's flake - local build_result="$QVM_STATE_DIR/vm-result" - - if ! nix build "$QVM_USER_FLAKE#vm" --out-link "$build_result"; then - die "Failed to build VM. Check your flake configuration at: $QVM_USER_FLAKE/flake.nix" - fi - - # Verify the result contains the VM runner script - local vm_runner="$build_result/bin/run-qvm-dev-vm" - if [[ ! -f "$vm_runner" ]]; then - # Try alternate name pattern - vm_runner=$(find "$build_result/bin" -name "run-*-vm" -type f 2>/dev/null | head -1) - if [[ -z "$vm_runner" || ! -f "$vm_runner" ]]; then - die "Build succeeded but VM runner script not found in: $build_result/bin/" - fi - fi - - # Move the result symlink to data dir (keeps nix store reference) - rm -f "$QVM_DATA_DIR/vm-result" - mv "$build_result" "$QVM_DATA_DIR/vm-result" - - # Get the basename of the runner script and construct path in new location - local runner_name - runner_name=$(basename "$vm_runner") - vm_runner="$QVM_DATA_DIR/vm-result/bin/$runner_name" - - # Create a symlink to the VM runner at our standard location - log_info "Installing VM runner to: $QVM_VM_RUNNER" - rm -f "$QVM_VM_RUNNER" - ln -sf "$vm_runner" "$QVM_VM_RUNNER" - - log_info "VM built successfully" - echo "" - echo "VM runner: $QVM_VM_RUNNER" -} - -# -# warn_if_running - Warn user if VM is currently running -# -warn_if_running() { - if is_vm_running; then - log_warn "VM is currently running" - echo "" - echo "The new base image will only take effect after restarting the VM:" - echo " qvm stop" - echo " qvm start" - echo "" - echo "Note: Changes to your VM overlay will be preserved." - echo " Use 'qvm reset' to start fresh with the new base image." - echo "" - fi -} - -# -# main - Main execution flow -# -main() { - log_info "Rebuilding QVM base image..." - - # Ensure required directories exist - ensure_dirs - - # Ensure user has a flake configuration - ensure_user_flake - - # Build the base image - build_base_image - - # Warn if VM is running - warn_if_running - - # Print next steps - echo "Next steps:" - if is_vm_running; then - echo " 1. Stop the VM: qvm stop" - echo " 2. Start the VM: qvm start" - else - echo " - Start the VM: qvm start" - fi - echo " - Customize the VM: edit $QVM_USER_FLAKE/flake.nix" - echo " - Reset to fresh state: qvm reset" -} - -# Run main function -main "$@" diff --git a/bin/qvm-reset b/bin/qvm-reset deleted file mode 100755 index 8025ced..0000000 --- a/bin/qvm-reset +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-reset - Wipe VM overlay and workspace registry -# -# This script resets the VM to a clean state by deleting the overlay.qcow2 -# and workspaces.json files. This is useful when you want to start fresh -# or if the VM state has become corrupted. -# -# IMPORTANT: This does NOT delete the base image (base.qcow2), so you won't -# need to re-download or rebuild the NixOS image. -# -# Usage: qvm reset [-f|--force] -# -f, --force Skip confirmation prompt -# - -set -euo pipefail - -# Source common library -readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}" -source "${QVM_LIB_DIR}/common.sh" - -# Get path to qvm-stop script -readonly QVM_STOP="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-stop" - -# -# confirm_reset - Prompt user for confirmation -# Returns: 0 if user confirms, exits script if user cancels -# -confirm_reset() { - echo - log_warn "This will delete:" - - if [[ -f "$QVM_OVERLAY" ]]; then - echo " - $QVM_OVERLAY" - fi - - if [[ -f "$QVM_WORKSPACES_FILE" ]]; then - echo " - $QVM_WORKSPACES_FILE" - fi - - if [[ ! -f "$QVM_OVERLAY" ]] && [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then - log_info "No files to delete (already clean)" - exit 0 - fi - - echo - echo "The base image (base.qcow2) and cache directories will NOT be deleted." - echo - read -p "Continue with reset? [y/N] " -n 1 -r - echo - - if [[ ! $REPLY =~ ^[Yy]$ ]]; then - log_info "Reset cancelled" - exit 0 - fi -} - -# -# main - Main reset orchestration -# -main() { - local force=false - - # Parse arguments - while [[ $# -gt 0 ]]; do - case "$1" in - -f|--force) - force=true - shift - ;; - *) - die "Unknown option: $1" - ;; - esac - done - - # Confirm unless --force is used - if [[ "$force" != true ]]; then - confirm_reset - fi - - # Stop VM if running - if is_vm_running; then - log_info "VM is running, stopping it first..." - "$QVM_STOP" - fi - - # Delete overlay if it exists - if [[ -f "$QVM_OVERLAY" ]]; then - log_info "Deleting overlay: $QVM_OVERLAY" - rm -f "$QVM_OVERLAY" - else - log_info "Overlay does not exist (nothing to delete)" - fi - - # Delete workspaces registry if it exists - if [[ -f "$QVM_WORKSPACES_FILE" ]]; then - log_info "Deleting workspaces registry: $QVM_WORKSPACES_FILE" - rm -f "$QVM_WORKSPACES_FILE" - else - log_info "Workspaces registry does not exist (nothing to delete)" - fi - - # Print success message with next steps - echo - log_info "Reset complete!" - echo - echo "Next steps:" - echo " - Run 'qvm start' to boot the VM with a fresh overlay" - echo " - Your base image (base.qcow2) is still intact" - echo " - Cache directories (cargo-home, pnpm-store, etc.) are preserved" - echo -} - -main "$@" diff --git a/bin/qvm-run b/bin/qvm-run deleted file mode 100755 index 99cb73a..0000000 --- a/bin/qvm-run +++ /dev/null @@ -1,309 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-run - Execute a command in the VM workspace -# -# This script: -# 1. Ensures VM is running (auto-starts if needed) -# 2. Registers current $PWD as a workspace in workspaces.json -# 3. SSHes into VM and executes command in workspace mount point -# 4. Streams output and preserves exit code -# -# Usage: qvm-run [args...] -# -# Notes: -# - Workspaces are pre-mounted at VM start time (no dynamic 9p hotplug) -# - If workspace not already registered, warns to restart VM -# - Uses workspace hash for mount tag and path -# - -set -euo pipefail - -# Source common library -QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}" -# shellcheck source=lib/common.sh -source "$QVM_LIB_DIR/common.sh" - -# -# show_usage - Display usage information -# -show_usage() { - cat < [args...] - -Execute a command in the VM at the current workspace directory. - -The current directory (\$PWD) is automatically registered as a workspace -and mounted into the VM. Commands run in the mounted workspace directory. - -Examples: - qvm run cargo build - qvm run npm install - qvm run ls -la - qvm run bash -c "pwd && ls" - -Notes: - - Workspaces are mounted at VM start time - - If this workspace is new, you'll need to restart the VM - - Command output streams to your terminal - - Exit code matches the command's exit code - -EOF -} - -# -# register_workspace - Add workspace to registry if not already present -# Args: $1 - absolute path to workspace -# $2 - workspace hash -# Returns: 0 if already registered, 1 if newly added (requires VM restart) -# -register_workspace() { - local workspace_path="$1" - local hash="$2" - local dir_name - dir_name=$(basename "$workspace_path") - local mount_tag="ws_${hash}" - local guest_path="/workspace/${hash}_${dir_name}" - - # Create workspaces.json if it doesn't exist - if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then - echo '[]' > "$QVM_WORKSPACES_FILE" - fi - - # Check if workspace already registered - if jq -e --arg path "$workspace_path" '.[] | select(.host_path == $path)' "$QVM_WORKSPACES_FILE" >/dev/null 2>&1; then - log_info "Workspace already registered: $workspace_path" - return 0 - fi - - # Add new workspace to registry - log_info "Registering new workspace: $workspace_path" - local temp_file - temp_file=$(mktemp) - - jq --arg path "$workspace_path" \ - --arg hash "$hash" \ - --arg tag "$mount_tag" \ - --arg guest "$guest_path" \ - '. += [{ - host_path: $path, - hash: $hash, - mount_tag: $tag, - guest_path: $guest - }]' "$QVM_WORKSPACES_FILE" > "$temp_file" - - mv "$temp_file" "$QVM_WORKSPACES_FILE" - log_info "Workspace registered as $mount_tag -> $guest_path" - - return 1 # Indicate new workspace added -} - -# -# ensure_workspace_mounted - Mount workspace in VM if not already mounted -# Args: $1 - SSH port -# $2 - mount tag (e.g., ws_abc123) -# $3 - guest path (e.g., /workspace/abc123) -# Returns: 0 on success -# -ensure_workspace_mounted() { - local ssh_port="$1" - local mount_tag="$2" - local guest_path="$3" - - # SSH into VM and mount the workspace - # - mkdir creates the mount point if missing - # - mount attempts to mount the 9p virtfs - # - || true ensures we don't fail if already mounted - sshpass -p root ssh -o StrictHostKeyChecking=no \ - -o UserKnownHostsFile=/dev/null \ - -o LogLevel=ERROR \ - -o PubkeyAuthentication=no \ - -o PasswordAuthentication=yes \ - -p "$ssh_port" \ - root@localhost \ - "mkdir -p '$guest_path' && mount -t 9p -o trans=virtio,version=9p2000.L,msize=104857600 '$mount_tag' '$guest_path' 2>/dev/null || true" >/dev/null 2>&1 - - return 0 -} - -# -# is_workspace_mounted - Check if workspace is actually mounted in VM -# Args: $1 - SSH port -# $2 - guest path -# Returns: 0 if mounted, 1 if not -# -is_workspace_mounted() { - local ssh_port="$1" - local guest_path="$2" - - # SSH into VM and check if guest path exists and is a directory - if sshpass -p root ssh -o StrictHostKeyChecking=no \ - -o UserKnownHostsFile=/dev/null \ - -o LogLevel=ERROR \ - -o PubkeyAuthentication=no \ - -o PasswordAuthentication=yes \ - -p "$ssh_port" \ - root@localhost \ - "test -d '$guest_path'" 2>/dev/null; then - return 0 - else - return 1 - fi -} - -# -# main - Main execution flow -# -main() { - # Show usage if no arguments - # Handle help flags first - if [[ $# -gt 0 && ( "$1" == "-h" || "$1" == "--help" ) ]]; then - show_usage - exit 0 - fi - - # Ensure directories exist before accessing workspaces.json - ensure_dirs - - # If no command given, default to interactive zsh shell - local run_shell=false - if [[ $# -eq 0 ]]; then - run_shell=true - fi - - # Get current workspace (absolute path) - local workspace_path - workspace_path="$(pwd)" - - # Generate workspace hash and guest path - local hash - hash=$(workspace_hash "$workspace_path") - local dir_name - dir_name=$(basename "$workspace_path") - - local guest_path="/workspace/${hash}_${dir_name}" - - log_info "Workspace: $workspace_path" - log_info "Guest path: $guest_path" - - # Register workspace in registry - local newly_added=0 - if ! register_workspace "$workspace_path" "$hash"; then - newly_added=1 - fi - - # If this is a newly registered workspace, restart VM to mount it - if [[ "$newly_added" -eq 1 ]] && is_vm_running; then - log_info "New workspace registered. Restarting VM to mount it..." - - local qvm_stop="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-stop" - local qvm_start="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-start" - - # Stop the VM - if ! "$qvm_stop"; then - die "Failed to stop VM" - fi - - # Start the VM with new workspace mount - if ! "$qvm_start"; then - die "Failed to start VM" - fi - - log_info "VM restarted with new workspace mounted" - fi - - # Ensure VM is running (if it wasn't running before) - if ! is_vm_running; then - log_info "VM not running, starting..." - - local qvm_start="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-start" - - if ! "$qvm_start"; then - die "Failed to start VM" - fi - fi - - # Get SSH port - local ssh_port - ssh_port=$(get_ssh_port) - - # Get mount tag from workspaces.json - local mount_tag - mount_tag=$(jq -r --arg path "$workspace_path" '.[] | select(.host_path == $path) | .mount_tag' "$QVM_WORKSPACES_FILE") - - # Ensure workspace is mounted (auto-mount if not) - log_info "Ensuring workspace is mounted..." - ensure_workspace_mounted "$ssh_port" "$mount_tag" "$guest_path" - - # Verify workspace is actually mounted - if ! is_workspace_mounted "$ssh_port" "$guest_path"; then - log_error "Failed to mount workspace in VM" - echo "" - echo "The workspace could not be mounted automatically." - echo "This may indicate the VM was started before this workspace was registered." - echo "" - echo "Please restart the VM to properly configure the workspace:" - echo " qvm stop" - echo " qvm start" - echo "" - echo "Then try your command again." - exit 1 - fi - - # Build SSH command - # - Use sshpass for automated password auth (password: root) - # - Use -t if stdin is a TTY (for interactive commands) - # - Suppress SSH warnings (ephemeral VM, host keys change) - # - cd to guest path and execute command - local ssh_cmd=( - sshpass -p root - ssh - -o StrictHostKeyChecking=no - -o UserKnownHostsFile=/dev/null - -o LogLevel=ERROR - -o PubkeyAuthentication=no - -o PasswordAuthentication=yes - -p "$ssh_port" - ) - - # Add -t flag if stdin is a TTY - if [[ -t 0 ]]; then - ssh_cmd+=(-t) - fi - - # Add connection target - ssh_cmd+=(root@localhost) - - # Build remote command: cd to workspace and execute user's command (or shell) - local remote_cmd="cd '$guest_path'" - - if [[ "$run_shell" == "true" ]]; then - # No command - start interactive zsh shell - remote_cmd+=" && exec zsh" - else - # Append user's command with proper quoting - remote_cmd+=" && " - local first_arg=1 - for arg in "$@"; do - if [[ $first_arg -eq 1 ]]; then - remote_cmd+="$arg" - first_arg=0 - else - # Quote arguments that contain spaces or special characters - if [[ "$arg" =~ [[:space:]] ]]; then - remote_cmd+=" '$arg'" - else - remote_cmd+=" $arg" - fi - fi - done - fi - - # Add the remote command as final SSH argument - ssh_cmd+=("$remote_cmd") - - # Execute SSH command (replaces current process) - exec "${ssh_cmd[@]}" -} - -# Run main function -main "$@" diff --git a/bin/qvm-ssh b/bin/qvm-ssh deleted file mode 100755 index 458debc..0000000 --- a/bin/qvm-ssh +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-ssh - Direct SSH access to the VM -# -# This script provides SSH access to the running VM: -# - Auto-starts VM if not running -# - Interactive shell by default (detects TTY) -# - Single command execution with -c flag -# - Passes through additional SSH arguments -# - Uses StrictHostKeyChecking=no for host key management -# - -set -euo pipefail - -# Source common library -QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$(readlink -f "$0")")/../lib}" -# shellcheck source=lib/common.sh -source "$QVM_LIB_DIR/common.sh" - -# -# show_usage - Display help text -# -show_usage() { - cat </dev/null; then - echo "$port" - return 0 - fi - (( port++ )) || true - (( attempt++ )) || true - done - - die "Could not find available port after $max_attempts attempts" -} - -# -# build_qemu_opts - Build QEMU_OPTS environment variable with 9p mounts -# -build_qemu_opts() { - local ssh_port="$1" - local opts="" - - # 9p mounts for shared caches - opts+="-virtfs local,path=$QVM_CARGO_HOME,mount_tag=cargo_home,security_model=mapped-xattr " - opts+="-virtfs local,path=$QVM_CARGO_TARGET,mount_tag=cargo_target,security_model=mapped-xattr " - opts+="-virtfs local,path=$QVM_PNPM_STORE,mount_tag=pnpm_store,security_model=mapped-xattr " - opts+="-virtfs local,path=$QVM_SCCACHE,mount_tag=sccache,security_model=mapped-xattr " - - # Mount host opencode config if it exists - if [[ -d "$QVM_HOST_OPENCODE_CONFIG" ]]; then - log_info "Adding opencode config mount..." - opts+="-virtfs local,path=$QVM_HOST_OPENCODE_CONFIG,mount_tag=opencode_config,security_model=mapped-xattr " - fi - - # Add workspace mounts from registry - if [[ -f "$QVM_WORKSPACES_FILE" && -s "$QVM_WORKSPACES_FILE" ]]; then - local workspace_count - workspace_count=$(jq -r 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0") - - if [[ "$workspace_count" -gt 0 ]]; then - log_info "Adding $workspace_count workspace mount(s)..." - - local i=0 - while (( i < workspace_count )); do - local path mount_tag - path=$(jq -r ".[$i].host_path" "$QVM_WORKSPACES_FILE") - mount_tag=$(jq -r ".[$i].mount_tag" "$QVM_WORKSPACES_FILE") - - if [[ -n "$path" && -n "$mount_tag" && "$path" != "null" && "$mount_tag" != "null" && -d "$path" ]]; then - log_info " - $path -> $mount_tag" - opts+="-virtfs local,path=$path,mount_tag=$mount_tag,security_model=mapped-xattr " - fi - - (( i++ )) || true - done - fi - fi - - # Serial console to log file and daemonize - opts+="-serial file:$QVM_SERIAL_LOG " - opts+="-display none " - opts+="-daemonize " - opts+="-pidfile $QVM_PID_FILE " - - echo "$opts" -} - -# -# cleanup_on_failure - Clean up state files if VM start fails -# -cleanup_on_failure() { - log_warn "Cleaning up after failed start..." - rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE" -} - -# -# main - Main execution flow -# -main() { - log_info "Starting QVM..." - - # Check if VM is already running - if is_vm_running; then - log_info "VM is already running" - local port - port=$(get_ssh_port) - echo "SSH available on port: $port" - echo "Use 'qvm ssh' to connect or 'qvm status' for details" - exit 0 - fi - - # First-run initialization - ensure_dirs - - # Source config file if it exists (sets QVM_MEMORY, QVM_CPUS, etc.) - # Check system-wide config first, then user config (user overrides system) - if [[ -f "/etc/xdg/qvm/qvm.conf" ]]; then - source "/etc/xdg/qvm/qvm.conf" - fi - if [[ -f "$QVM_CONFIG_FILE" ]]; then - source "$QVM_CONFIG_FILE" - fi - - # Check if VM runner exists, build if not - if [[ ! -L "$QVM_VM_RUNNER" || ! -f "$(readlink -f "$QVM_VM_RUNNER" 2>/dev/null || echo "")" ]]; then - log_info "First run detected - building VM..." - log_info "This may take several minutes." - - local qvm_rebuild="${QVM_BIN_DIR:-$(dirname "$0")}/qvm-rebuild" - if ! "$qvm_rebuild"; then - die "Failed to build VM. Run 'qvm rebuild' manually to debug." - fi - fi - - # Verify VM runner exists now - if [[ ! -L "$QVM_VM_RUNNER" ]]; then - die "VM runner not found at $QVM_VM_RUNNER. Run 'qvm rebuild' first." - fi - - local vm_script - vm_script=$(readlink -f "$QVM_VM_RUNNER") - if [[ ! -f "$vm_script" ]]; then - die "VM runner script not found. Run 'qvm rebuild' to fix." - fi - - # Find available SSH port - local ssh_port - ssh_port=$(find_available_port 2222) - log_info "Using SSH port: $ssh_port" - - # Get memory and CPU settings from environment or use defaults - local memory="${QVM_MEMORY:-30G}" - local cpus="${QVM_CPUS:-30}" - log_info "VM resources: ${memory} memory, ${cpus} CPUs" - - # Build QEMU options - local qemu_opts - qemu_opts=$(build_qemu_opts "$ssh_port") - - # Launch VM using the NixOS runner script - # The runner script respects these environment variables: - # - QEMU_OPTS: additional QEMU options - # - NIX_DISK_IMAGE: path to disk image (optional, uses tmpdir by default) - log_info "Launching VM..." - - # Create persistent disk image location if needed - local disk_image="$QVM_STATE_DIR/qvm-dev.qcow2" - - export QEMU_OPTS="$qemu_opts -m $memory -smp $cpus" - export QEMU_NET_OPTS="hostfwd=tcp::${ssh_port}-:22" - export NIX_DISK_IMAGE="$disk_image" - - # Run VM - the script uses exec with qemu's -daemonize flag, so it returns quickly - if ! "$vm_script" &>/dev/null; then - cleanup_on_failure - die "Failed to start VM" - fi - - # Wait a moment for QEMU to create PID file - sleep 2 - - # If PID file wasn't created by our QEMU_OPTS, get it from the background process - if [[ ! -f "$QVM_PID_FILE" ]]; then - # Try to find the QEMU process - local qemu_pid - qemu_pid=$(pgrep -f "qemu.*qvm-dev" | head -1 || echo "") - if [[ -n "$qemu_pid" ]]; then - echo "$qemu_pid" > "$QVM_PID_FILE" - fi - fi - - # Save SSH port to file - echo "$ssh_port" > "$QVM_SSH_PORT_FILE" - - # Wait for SSH to become available - if ! wait_for_ssh "$ssh_port" 120; then - cleanup_on_failure - die "VM started but SSH did not become available. Check: $QVM_SERIAL_LOG" - fi - - # Success! - log_info "VM started successfully" - echo "" - echo "SSH available on port: $ssh_port" - echo "Connect with: qvm ssh" - echo "Check status: qvm status" - echo "Serial log: $QVM_SERIAL_LOG" -} - -# Run main function -main "$@" diff --git a/bin/qvm-status b/bin/qvm-status deleted file mode 100755 index 7e1af05..0000000 --- a/bin/qvm-status +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-status - Display VM state, configuration, and connection information -# -# Shows current VM status including: -# - Running state (PID, uptime, SSH port) -# - Mounted workspaces from workspaces.json -# - Cache directory status -# - Base image and overlay details -# - Connection hints for SSH and run commands -# -# Exit codes: -# 0 - VM is running -# 1 - VM is stopped - -set -euo pipefail - -# Source common library for shared functions and constants -readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(dirname "$0")/../lib}" -source "${QVM_LIB_DIR}/common.sh" - -# Additional color codes for status display -if [[ -t 1 ]]; then - readonly COLOR_SUCCESS='\033[0;32m' # Green - readonly COLOR_HEADER='\033[1;37m' # Bold White - readonly COLOR_DIM='\033[0;90m' # Dim Gray -else - readonly COLOR_SUCCESS='' - readonly COLOR_HEADER='' - readonly COLOR_DIM='' -fi - -# -# format_bytes - Convert bytes to human-readable format -# Args: $1 - size in bytes -# Returns: formatted string (e.g., "1.5G", "256M", "4.0K") -# -format_bytes() { - local bytes="$1" - if (( bytes >= 1073741824 )); then - printf "%.1fG" "$(echo "scale=1; $bytes / 1073741824" | bc)" - elif (( bytes >= 1048576 )); then - printf "%.1fM" "$(echo "scale=1; $bytes / 1048576" | bc)" - elif (( bytes >= 1024 )); then - printf "%.1fK" "$(echo "scale=1; $bytes / 1024" | bc)" - else - printf "%dB" "$bytes" - fi -} - -# -# get_uptime - Calculate VM uptime from PID -# Args: $1 - process PID -# Returns: uptime string (e.g., "2h 15m", "45m", "30s") -# -get_uptime() { - local pid="$1" - - # Get process start time in seconds since epoch - local start_time - start_time=$(ps -p "$pid" -o lstart= 2>/dev/null | xargs -I{} date -d "{}" +%s) - - if [[ -z "$start_time" ]]; then - echo "unknown" - return - fi - - local current_time - current_time=$(date +%s) - local uptime_seconds=$((current_time - start_time)) - - # Format uptime - local hours=$((uptime_seconds / 3600)) - local minutes=$(( (uptime_seconds % 3600) / 60 )) - local seconds=$((uptime_seconds % 60)) - - if (( hours > 0 )); then - printf "%dh %dm" "$hours" "$minutes" - elif (( minutes > 0 )); then - printf "%dm %ds" "$minutes" "$seconds" - else - printf "%ds" "$seconds" - fi -} - -# -# show_file_info - Display file status with size and modification time -# Args: $1 - file path -# $2 - label (e.g., "Base Image") -# -show_file_info() { - local file="$1" - local label="$2" - - if [[ -f "$file" ]]; then - local size_bytes - size_bytes=$(stat -c %s "$file" 2>/dev/null || echo "0") - local size_human - size_human=$(format_bytes "$size_bytes") - - local mod_time - mod_time=$(stat -c %y "$file" 2>/dev/null | cut -d. -f1) - - echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $label: ${COLOR_INFO}$size_human${COLOR_RESET} ${COLOR_DIM}(modified: $mod_time)${COLOR_RESET}" - else - echo -e " ${COLOR_WARN}✗${COLOR_RESET} $label: ${COLOR_WARN}missing${COLOR_RESET}" - fi -} - -# -# show_dir_info - Display directory status -# Args: $1 - directory path -# $2 - label (e.g., "Cargo Home") -# -show_dir_info() { - local dir="$1" - local label="$2" - - if [[ -d "$dir" ]]; then - echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $label: ${COLOR_DIM}$dir${COLOR_RESET}" - else - echo -e " ${COLOR_WARN}✗${COLOR_RESET} $label: ${COLOR_WARN}not created${COLOR_RESET}" - fi -} - -# -# show_workspaces - Display mounted workspaces from workspaces.json -# -show_workspaces() { - if [[ ! -f "$QVM_WORKSPACES_FILE" ]]; then - echo -e " ${COLOR_DIM}No workspaces mounted${COLOR_RESET}" - return - fi - - # Check if file is valid JSON and has workspaces - local workspace_count - workspace_count=$(jq 'length' "$QVM_WORKSPACES_FILE" 2>/dev/null || echo "0") - - if (( workspace_count == 0 )); then - echo -e " ${COLOR_DIM}No workspaces mounted${COLOR_RESET}" - return - fi - - # Parse and display each workspace - jq -r 'to_entries[] | "\(.key)|\(.value.host_path)|\(.value.guest_path)"' "$QVM_WORKSPACES_FILE" 2>/dev/null | while IFS='|' read -r hash host_path guest_path; do - echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} $hash: ${COLOR_INFO}$host_path${COLOR_RESET} → ${COLOR_DIM}$guest_path${COLOR_RESET}" - done -} - -# -# main - Main status display logic -# -main() { - # Header - echo -e "${COLOR_HEADER}QVM Status${COLOR_RESET}" - echo "" - - # VM State - echo -e "${COLOR_HEADER}VM State:${COLOR_RESET}" - - if is_vm_running; then - local pid - pid=$(cat "$QVM_PID_FILE") - - local ssh_port - if [[ -f "$QVM_SSH_PORT_FILE" ]]; then - ssh_port=$(cat "$QVM_SSH_PORT_FILE") - else - ssh_port="unknown" - fi - - local uptime - uptime=$(get_uptime "$pid") - - echo -e " ${COLOR_SUCCESS}✓${COLOR_RESET} Running" - echo -e " ${COLOR_DIM}PID:${COLOR_RESET} $pid" - echo -e " ${COLOR_DIM}SSH:${COLOR_RESET} localhost:$ssh_port" - echo -e " ${COLOR_DIM}Uptime:${COLOR_RESET} $uptime" - else - echo -e " ${COLOR_WARN}✗${COLOR_RESET} Stopped" - fi - - echo "" - - # Workspaces - echo -e "${COLOR_HEADER}Mounted Workspaces:${COLOR_RESET}" - show_workspaces - echo "" - - # Cache Directories - echo -e "${COLOR_HEADER}Cache Directories:${COLOR_RESET}" - show_dir_info "$QVM_CARGO_HOME" "Cargo Home" - show_dir_info "$QVM_CARGO_TARGET" "Cargo Target" - show_dir_info "$QVM_PNPM_STORE" "PNPM Store" - show_dir_info "$QVM_SCCACHE" "SCCache" - echo "" - - # VM Images - echo -e "${COLOR_HEADER}VM Images:${COLOR_RESET}" - show_file_info "$QVM_BASE_IMAGE" "Base Image" - show_file_info "$QVM_OVERLAY" "Overlay" - echo "" - - # Connection Hints (only if VM is running) - if is_vm_running; then - local ssh_port - ssh_port=$(cat "$QVM_SSH_PORT_FILE" 2>/dev/null || echo "unknown") - - echo -e "${COLOR_HEADER}Connection:${COLOR_RESET}" - echo -e " ${COLOR_INFO}SSH:${COLOR_RESET} qvm ssh" - echo -e " ${COLOR_INFO}Run cmd:${COLOR_RESET} qvm run " - echo -e " ${COLOR_INFO}Direct:${COLOR_RESET} ssh -p $ssh_port root@localhost" - echo "" - - # Exit success if running - exit 0 - else - echo -e "${COLOR_HEADER}Quick Start:${COLOR_RESET}" - echo -e " ${COLOR_INFO}Start VM:${COLOR_RESET} qvm start" - echo "" - - # Exit failure if stopped - exit 1 - fi -} - -# Execute main -main "$@" diff --git a/bin/qvm-stop b/bin/qvm-stop deleted file mode 100755 index c42a4c2..0000000 --- a/bin/qvm-stop +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash -# -# qvm-stop - Gracefully shut down the QEMU VM -# -# This script stops the running VM by sending SIGTERM first for graceful -# shutdown, waiting up to 30 seconds, then sending SIGKILL if necessary. -# It cleans up state files (vm.pid, ssh.port) after shutdown completes. -# -# Usage: qvm stop -# - -set -euo pipefail - -# Source common library -readonly QVM_LIB_DIR="${QVM_LIB_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../lib" && pwd)}" -source "${QVM_LIB_DIR}/common.sh" - -# Timeout for graceful shutdown in seconds -readonly SHUTDOWN_TIMEOUT=30 - -# -# wait_for_process_exit - Wait for process to terminate -# Args: $1 - PID to wait for -# $2 - timeout in seconds -# Returns: 0 if process exits, 1 on timeout -# -wait_for_process_exit() { - local pid="$1" - local timeout="$2" - local elapsed=0 - - while (( elapsed < timeout )); do - if ! kill -0 "$pid" 2>/dev/null; then - return 0 - fi - sleep 1 - (( elapsed++ )) - done - - return 1 -} - -# -# main - Main shutdown orchestration -# -main() { - # Check if VM is running - if ! is_vm_running; then - log_info "VM is not running" - exit 0 - fi - - # Get VM process PID - local vm_pid - vm_pid=$(cat "$QVM_PID_FILE") - - log_info "Shutting down VM (PID: $vm_pid)..." - - # Send SIGTERM for graceful shutdown - if kill -TERM "$vm_pid" 2>/dev/null; then - log_info "Sent SIGTERM, waiting up to ${SHUTDOWN_TIMEOUT}s for graceful shutdown..." - - if wait_for_process_exit "$vm_pid" "$SHUTDOWN_TIMEOUT"; then - log_info "VM shut down gracefully" - else - log_warn "Graceful shutdown timeout, forcefully terminating..." - - # Send SIGKILL to force termination - if kill -KILL "$vm_pid" 2>/dev/null; then - # Wait briefly to ensure process is dead - sleep 1 - - # Verify process is actually dead - if kill -0 "$vm_pid" 2>/dev/null; then - die "Failed to kill VM process $vm_pid" - fi - - log_info "VM forcefully terminated" - else - log_warn "Process $vm_pid already terminated" - fi - fi - else - log_warn "Process $vm_pid already terminated (could not send SIGTERM)" - fi - - # Clean up state files - log_info "Cleaning up state files..." - rm -f "$QVM_PID_FILE" "$QVM_SSH_PORT_FILE" - - log_info "VM stopped successfully" -} - -main "$@" diff --git a/docs/virtio-fs-requirements.md b/docs/virtio-fs-requirements.md new file mode 100644 index 0000000..70d07c4 --- /dev/null +++ b/docs/virtio-fs-requirements.md @@ -0,0 +1,135 @@ +# virtio-fs Prerequisites Verification + +## Date: 2026-01-26 + +This document verifies that all prerequisites for virtio-fs support are available in nixpkgs. + +## Requirements Summary + +virtio-fs requires: +1. **QEMU 4.2.0+** - vhost-user-fs device support introduced +2. **virtiofsd daemon** - User-space filesystem daemon for virtio-fs +3. **Guest kernel CONFIG_VIRTIO_FS** - Linux 5.4+ with virtio-fs driver enabled + +--- + +## Verification Results + +### 1. QEMU Version + +**Command:** +```bash +nix eval nixpkgs#qemu.version --raw +``` + +**Result:** +``` +10.1.2 +``` + +**Status:** ✅ **PASS** - Version 10.1.2 is significantly newer than the required 4.2.0 + +**Notes:** +- QEMU 10.x includes full virtio-fs support with vhost-user-fs backend +- Package provides `qemu-kvm` as main program +- Multiple outputs available: out, doc, ga, debug + +--- + +### 2. virtiofsd Package + +**Command:** +```bash +nix search nixpkgs virtiofsd --json +``` + +**Result:** +``` +legacyPackages.x86_64-linux.virtiofsd +``` + +**Version Check:** +```bash +nix eval nixpkgs#virtiofsd.version --raw +``` + +**Version:** +``` +1.13.2 +``` + +**Status:** ✅ **PASS** - virtiofsd is available as a standalone package + +**Notes:** +- virtiofsd is available in nixpkgs as a separate package +- Version 1.13.2 is a modern Rust-based implementation +- This is the newer rust-virtiofsd, not the original C implementation from QEMU + +--- + +### 3. Kernel CONFIG_VIRTIO_FS Support + +**Command:** +```bash +nix-shell -p linux --run "zcat /proc/config.gz | grep CONFIG_VIRTIO_FS" +``` + +**Kernel Version:** +```bash +nix eval nixpkgs#linux.version --raw +``` + +**Version:** `6.12.63` + +**Result:** +``` +CONFIG_VIRTIO_FS=m +``` + +**Status:** ✅ **PASS** - virtio-fs is enabled as a loadable kernel module + +**Notes:** +- Linux kernel 6.12.63 is much newer than the required 5.4+ +- CONFIG_VIRTIO_FS is compiled as a module (`=m`) +- Module will be available in NixOS VM builds by default +- virtio-fs driver can be loaded on-demand + +--- + +## NixOS-Specific Considerations + +### Module Loading +- Kernel module `virtiofs` will need to be loaded in the guest +- NixOS typically handles this automatically via `boot.initrd.availableKernelModules` or runtime modprobe + +### QEMU Integration +- QEMU package in nixpkgs is the full-featured build +- Includes vhost-user-fs device support +- No custom QEMU build needed + +### virtiofsd Daemon +- Must be started on the host before launching VM +- Requires socket path for communication with QEMU +- NixOS can manage this via systemd if desired + +--- + +## Conclusion + +**✅ ALL PREREQUISITES AVAILABLE** + +All required components for virtio-fs are present in nixpkgs: +- ✅ QEMU 10.1.2 (requirement: 4.2.0+) +- ✅ virtiofsd 1.13.2 (standalone package) +- ✅ Linux kernel 6.12.63 with CONFIG_VIRTIO_FS=m (requirement: 5.4+) + +**No blockers identified.** We can proceed with virtio-fs implementation. + +--- + +## Next Steps + +1. Design virtiofsd socket/daemon management strategy +2. Update QEMU launch arguments to use vhost-user-fs-pci device +3. Configure guest kernel to mount virtio-fs filesystems +4. Update NixOS VM flake to include virtiofsd in systemPackages or as a service diff --git a/flake.nix b/flake.nix index f2d0c72..bd1f728 100644 --- a/flake.nix +++ b/flake.nix @@ -53,67 +53,42 @@ let pkgs = import nixpkgs { inherit system; }; - qvm = pkgs.stdenv.mkDerivation { + qvm = pkgs.buildGoModule { pname = "qvm"; version = "0.1.0"; + # NOTE: In a flake, only git-tracked files are included by default. + # The Go source files must be committed to git for this build to work. + # For development, use: go build ./cmd/qvm src = ./.; + vendorHash = "sha256-d6Z32nPDawwFqhKfVw/QwHUuDuMuTdQdHApmxcXzFng="; + + subPackages = [ "cmd/qvm" ]; + nativeBuildInputs = with pkgs; [ makeWrapper - installShellFiles ]; - buildInputs = with pkgs; [ - bash - ]; - - installPhase = '' - runHook preInstall - - # Create output directories - mkdir -p $out/bin - mkdir -p $out/lib/qvm - mkdir -p $out/share/qvm - - # Install library files - install -Dm755 lib/common.sh $out/lib/qvm/common.sh - + postInstall = '' # Install default VM flake template - if [ -d "flake/default-vm" ]; then - cp -r flake/default-vm $out/share/qvm/default-vm + if [ -d "$src/flake/default-vm" ]; then + mkdir -p $out/share/qvm + cp -r $src/flake/default-vm $out/share/qvm/default-vm fi - # Install all scripts from bin/ - for script in bin/*; do - if [ -f "$script" ]; then - install -Dm755 "$script" "$out/bin/$(basename "$script")" - fi - done - - # Wrap all scripts with PATH containing required dependencies - for script in $out/bin/*; do - wrapProgram "$script" \ - --prefix PATH : ${ - pkgs.lib.makeBinPath [ - pkgs.qemu - pkgs.openssh - pkgs.jq - pkgs.coreutils - pkgs.gnused - pkgs.gnugrep - pkgs.nix - pkgs.netcat-gnu - pkgs.bc - pkgs.procps - pkgs.sshpass - ] - } \ - --set QVM_LIB_DIR "$out/lib/qvm" \ - --set QVM_BIN_DIR "$out/bin" - done - - runHook postInstall + # Wrap binary with PATH containing required dependencies + wrapProgram $out/bin/qvm \ + --prefix PATH : ${ + pkgs.lib.makeBinPath [ + pkgs.qemu + pkgs.openssh + pkgs.jq + pkgs.nix + pkgs.netcat-gnu + pkgs.sshpass + ] + } ''; meta = with pkgs.lib; { @@ -150,24 +125,20 @@ qemu openssh jq - coreutils - gnused - gnugrep nix netcat-gnu - bc - procps sshpass # Development tools - shellcheck - shfmt + go + gopls + gotools ]; shellHook = '' - export QVM_LIB_DIR="$(pwd)/lib" - echo "QVM development environment" - echo "Library directory: $QVM_LIB_DIR" + echo "QVM development environment (Go)" + echo "Build: go build ./cmd/qvm" + echo "Run: ./qvm status" ''; }; } diff --git a/flake/default-vm/flake.lock b/flake/default-vm/flake.lock new file mode 100644 index 0000000..b95a4fb --- /dev/null +++ b/flake/default-vm/flake.lock @@ -0,0 +1,1059 @@ +{ + "nodes": { + "common": { + "locked": { + "dir": "flakes/common", + "lastModified": 1769438846, + "narHash": "sha256-ahQYSazuB2RpF3XUYqKdwgOBFSbGUB2zQsqKEkSOuxA=", + "ref": "refs/heads/master", + "rev": "4bb36c0f7570b271bbeda67f9c4d5160c819850a", + "revCount": 1176, + "type": "git", + "url": "https://git.joshuabell.xyz/ringofstorms/dotfiles" + }, + "original": { + "dir": "flakes/common", + "type": "git", + "url": "https://git.joshuabell.xyz/ringofstorms/dotfiles" + } + }, + "home-manager": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1769442288, + "narHash": "sha256-p+Xqr+P22TYW2RqbwccSd4UlUDEwl7PnoW3qiH8wVoE=", + "owner": "nix-community", + "repo": "home-manager", + "rev": "384786dc70c4992643f916c7e57f378714fec4f1", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1769170682, + "narHash": "sha256-oMmN1lVQU0F0W2k6OI3bgdzp2YOHWYUAw79qzDSjenU=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c5296fdd05cfa2c187990dd909864da9658df755", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1768393167, + "narHash": "sha256-n2063BRjHde6DqAz2zavhOOiLUwA3qXt7jQYHyETjX8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "2f594d5af95d4fdac67fba60376ec11e482041cb", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1769018530, + "narHash": "sha256-MJ27Cy2NtBEV5tsK+YraYr2g851f3Fl1LpNHDzDX15c=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "88d3861acdd3d2f0e361767018218e51810df8a1", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nvim_plugin-Almo7aya/openingh.nvim": { + "flake": false, + "locked": { + "lastModified": 1746139196, + "narHash": "sha256-/FlNLWOSIrOYiWzAcgOdu9//QTorCDV1KWb+h6eqLwk=", + "owner": "Almo7aya", + "repo": "openingh.nvim", + "rev": "7cc8c897cb6b34d8ed28e99d95baccef609ed251", + "type": "github" + }, + "original": { + "owner": "Almo7aya", + "repo": "openingh.nvim", + "type": "github" + } + }, + "nvim_plugin-JoosepAlviste/nvim-ts-context-commentstring": { + "flake": false, + "locked": { + "lastModified": 1733574156, + "narHash": "sha256-AjDM3+n4+lNBQi8P2Yrh0Ab06uYCndBQT9TX36rDbOM=", + "owner": "JoosepAlviste", + "repo": "nvim-ts-context-commentstring", + "rev": "1b212c2eee76d787bbea6aa5e92a2b534e7b4f8f", + "type": "github" + }, + "original": { + "owner": "JoosepAlviste", + "repo": "nvim-ts-context-commentstring", + "type": "github" + } + }, + "nvim_plugin-L3MON4D3/LuaSnip": { + "flake": false, + "locked": { + "lastModified": 1768781124, + "narHash": "sha256-3beTsZPT2EwzAVhEzqNhFRnDAU7C7GlzFFymY3aNp9M=", + "owner": "L3MON4D3", + "repo": "LuaSnip", + "rev": "dae4f5aaa3574bd0c2b9dd20fb9542a02c10471c", + "type": "github" + }, + "original": { + "owner": "L3MON4D3", + "repo": "LuaSnip", + "type": "github" + } + }, + "nvim_plugin-MeanderingProgrammer/render-markdown.nvim": { + "flake": false, + "locked": { + "lastModified": 1768589366, + "narHash": "sha256-kKvIivCNe6HlGqiChySeqEHRB0rj6ipKwjko4ix4tRw=", + "owner": "MeanderingProgrammer", + "repo": "render-markdown.nvim", + "rev": "c54380dd4d8d1738b9691a7c349ecad7967ac12e", + "type": "github" + }, + "original": { + "owner": "MeanderingProgrammer", + "repo": "render-markdown.nvim", + "type": "github" + } + }, + "nvim_plugin-MunifTanjim/nui.nvim": { + "flake": false, + "locked": { + "lastModified": 1749392788, + "narHash": "sha256-41slmnvt1z7sCxvpiVuFmQ9g7eCaxQi1dDCL3AxSL1A=", + "owner": "MunifTanjim", + "repo": "nui.nvim", + "rev": "de740991c12411b663994b2860f1a4fd0937c130", + "type": "github" + }, + "original": { + "owner": "MunifTanjim", + "repo": "nui.nvim", + "type": "github" + } + }, + "nvim_plugin-RRethy/vim-illuminate": { + "flake": false, + "locked": { + "lastModified": 1748105647, + "narHash": "sha256-KqAJRCtDBG5xsvNsqkxoBdDckg02u4NBBreYQw7BphA=", + "owner": "RRethy", + "repo": "vim-illuminate", + "rev": "0d1e93684da00ab7c057410fecfc24f434698898", + "type": "github" + }, + "original": { + "owner": "RRethy", + "repo": "vim-illuminate", + "type": "github" + } + }, + "nvim_plugin-Saecki/crates.nvim": { + "flake": false, + "locked": { + "lastModified": 1755956579, + "narHash": "sha256-jfmST/S9ymwgQ99PTCOlJkk5zaxE5HiDV16TmTISDII=", + "owner": "Saecki", + "repo": "crates.nvim", + "rev": "ac9fa498a9edb96dc3056724ff69d5f40b898453", + "type": "github" + }, + "original": { + "owner": "Saecki", + "repo": "crates.nvim", + "type": "github" + } + }, + "nvim_plugin-aznhe21/actions-preview.nvim": { + "flake": false, + "locked": { + "lastModified": 1759462626, + "narHash": "sha256-YUeWBXxxeurfWBi0PjUi6izqYAvUw9DHmvsuPXm7ohw=", + "owner": "aznhe21", + "repo": "actions-preview.nvim", + "rev": "cb938c25edaac38d362555f19244a9cb85d561e8", + "type": "github" + }, + "original": { + "owner": "aznhe21", + "repo": "actions-preview.nvim", + "type": "github" + } + }, + "nvim_plugin-b0o/schemastore.nvim": { + "flake": false, + "locked": { + "lastModified": 1768946435, + "narHash": "sha256-nv8ZAqrQBe0ckmHqeI4HLLOKphwxjq7v3OAShQ7HtcY=", + "owner": "b0o", + "repo": "schemastore.nvim", + "rev": "a0375eb6f7f944723162ef41c200ac6b364f53ac", + "type": "github" + }, + "original": { + "owner": "b0o", + "repo": "schemastore.nvim", + "type": "github" + } + }, + "nvim_plugin-catppuccin/nvim": { + "flake": false, + "locked": { + "lastModified": 1767886506, + "narHash": "sha256-cZ6VeF69s0eQ9I7Tz8MoEKuF9w+TbA94vXj2EuDoSgU=", + "owner": "catppuccin", + "repo": "nvim", + "rev": "beaf41a30c26fd7d6c386d383155cbd65dd554cd", + "type": "github" + }, + "original": { + "owner": "catppuccin", + "repo": "nvim", + "type": "github" + } + }, + "nvim_plugin-chrisgrieser/nvim-early-retirement": { + "flake": false, + "locked": { + "lastModified": 1767720082, + "narHash": "sha256-2fQsVQUAuKX0uz+umM7VCUFPRKmIr7L9KQIgnlo3bG0=", + "owner": "chrisgrieser", + "repo": "nvim-early-retirement", + "rev": "79ea1568df53986e0d9f4d36fd542933a34b2e61", + "type": "github" + }, + "original": { + "owner": "chrisgrieser", + "repo": "nvim-early-retirement", + "type": "github" + } + }, + "nvim_plugin-declancm/cinnamon.nvim": { + "flake": false, + "locked": { + "lastModified": 1722992123, + "narHash": "sha256-kccQ4iFMSQ8kvE7hYz90hBrsDLo7VohFj/6lEZZiAO8=", + "owner": "declancm", + "repo": "cinnamon.nvim", + "rev": "450cb3247765fed7871b41ef4ce5fa492d834215", + "type": "github" + }, + "original": { + "owner": "declancm", + "repo": "cinnamon.nvim", + "type": "github" + } + }, + "nvim_plugin-direnv/direnv.vim": { + "flake": false, + "locked": { + "lastModified": 1701514458, + "narHash": "sha256-Lwwm95UEkS8Q0Qsoh10o3sFn48wf7v7eCX/FJJV1HMI=", + "owner": "direnv", + "repo": "direnv.vim", + "rev": "ab2a7e08dd630060cd81d7946739ac7442a4f269", + "type": "github" + }, + "original": { + "owner": "direnv", + "repo": "direnv.vim", + "type": "github" + } + }, + "nvim_plugin-echasnovski/mini.nvim": { + "flake": false, + "locked": { + "lastModified": 1769086673, + "narHash": "sha256-dyKTxP4f6ZZ6MN4nz87X+K1d/WxhwnQ6+HxHSKgqacM=", + "owner": "echasnovski", + "repo": "mini.nvim", + "rev": "9b935c218ddba02e5dc75c94f90143bce1f7c646", + "type": "github" + }, + "original": { + "owner": "echasnovski", + "repo": "mini.nvim", + "type": "github" + } + }, + "nvim_plugin-folke/lazy.nvim": { + "flake": false, + "locked": { + "lastModified": 1765971162, + "narHash": "sha256-5A4kducPwKb5fKX4oSUFvo898P0dqfsqqLxFaXBsbQY=", + "owner": "folke", + "repo": "lazy.nvim", + "rev": "306a05526ada86a7b30af95c5cc81ffba93fef97", + "type": "github" + }, + "original": { + "owner": "folke", + "repo": "lazy.nvim", + "type": "github" + } + }, + "nvim_plugin-folke/lazydev.nvim": { + "flake": false, + "locked": { + "lastModified": 1762423570, + "narHash": "sha256-1g1PLFR3bc++NimbrRpoOMZyqYWHeX6pDoxsiuoJHus=", + "owner": "folke", + "repo": "lazydev.nvim", + "rev": "5231c62aa83c2f8dc8e7ba957aa77098cda1257d", + "type": "github" + }, + "original": { + "owner": "folke", + "repo": "lazydev.nvim", + "type": "github" + } + }, + "nvim_plugin-folke/trouble.nvim": { + "flake": false, + "locked": { + "lastModified": 1761919011, + "narHash": "sha256-6U/KWjvRMxWIxcsI2xNU/ltfgkaFG4E3BdzC7brK/DI=", + "owner": "folke", + "repo": "trouble.nvim", + "rev": "bd67efe408d4816e25e8491cc5ad4088e708a69a", + "type": "github" + }, + "original": { + "owner": "folke", + "repo": "trouble.nvim", + "type": "github" + } + }, + "nvim_plugin-folke/which-key.nvim": { + "flake": false, + "locked": { + "lastModified": 1761664528, + "narHash": "sha256-rKaYnXM4gRkkF/+xIFm2oCZwtAU6CeTdRWU93N+Jmbc=", + "owner": "folke", + "repo": "which-key.nvim", + "rev": "3aab2147e74890957785941f0c1ad87d0a44c15a", + "type": "github" + }, + "original": { + "owner": "folke", + "repo": "which-key.nvim", + "type": "github" + } + }, + "nvim_plugin-ggml-org/llama.vim": { + "flake": false, + "locked": { + "lastModified": 1768976621, + "narHash": "sha256-aeA2YDbORc/4j3ANoe1YDizh7zknGif1scv6mMTVa0E=", + "owner": "ggml-org", + "repo": "llama.vim", + "rev": "85ec507281e246ad3e4b1d945ed92eea0745f0fd", + "type": "github" + }, + "original": { + "owner": "ggml-org", + "repo": "llama.vim", + "type": "github" + } + }, + "nvim_plugin-hrsh7th/cmp-buffer": { + "flake": false, + "locked": { + "lastModified": 1743497185, + "narHash": "sha256-dG4U7MtnXThoa/PD+qFtCt76MQ14V1wX8GMYcvxEnbM=", + "owner": "hrsh7th", + "repo": "cmp-buffer", + "rev": "b74fab3656eea9de20a9b8116afa3cfc4ec09657", + "type": "github" + }, + "original": { + "owner": "hrsh7th", + "repo": "cmp-buffer", + "type": "github" + } + }, + "nvim_plugin-hrsh7th/cmp-nvim-lsp": { + "flake": false, + "locked": { + "lastModified": 1763018865, + "narHash": "sha256-CYZdfAsJYQyW413fRvNbsS5uayuc6fKDvDLZ2Y7j3ZQ=", + "owner": "hrsh7th", + "repo": "cmp-nvim-lsp", + "rev": "cbc7b02bb99fae35cb42f514762b89b5126651ef", + "type": "github" + }, + "original": { + "owner": "hrsh7th", + "repo": "cmp-nvim-lsp", + "type": "github" + } + }, + "nvim_plugin-hrsh7th/cmp-path": { + "flake": false, + "locked": { + "lastModified": 1753844861, + "narHash": "sha256-e4Rd2y1Wekp7aobpTGaUeoSBnlfIASDaBR8js5dh2Vw=", + "owner": "hrsh7th", + "repo": "cmp-path", + "rev": "c642487086dbd9a93160e1679a1327be111cbc25", + "type": "github" + }, + "original": { + "owner": "hrsh7th", + "repo": "cmp-path", + "type": "github" + } + }, + "nvim_plugin-hrsh7th/nvim-cmp": { + "flake": false, + "locked": { + "lastModified": 1767368202, + "narHash": "sha256-gwuiUgz3UEFpaKs79BSWS4qkwOi+XMHIDFdYRatWt0g=", + "owner": "hrsh7th", + "repo": "nvim-cmp", + "rev": "85bbfad83f804f11688d1ab9486b459e699292d6", + "type": "github" + }, + "original": { + "owner": "hrsh7th", + "repo": "nvim-cmp", + "type": "github" + } + }, + "nvim_plugin-j-hui/fidget.nvim": { + "flake": false, + "locked": { + "lastModified": 1768329414, + "narHash": "sha256-Zap4UVicIvCaPqCMgdlnEAGbMzq1xM4uGpVqZL1iju0=", + "owner": "j-hui", + "repo": "fidget.nvim", + "rev": "7fa433a83118a70fe24c1ce88d5f0bd3453c0970", + "type": "github" + }, + "original": { + "owner": "j-hui", + "repo": "fidget.nvim", + "type": "github" + } + }, + "nvim_plugin-johmsalas/text-case.nvim": { + "flake": false, + "locked": { + "lastModified": 1722628320, + "narHash": "sha256-2IMufSMy9JW50VzZ3SgOtp8kYs81ANwV0eP0ZH3rTFo=", + "owner": "johmsalas", + "repo": "text-case.nvim", + "rev": "e898cfd46fa6cde0e83abb624a16e67d2ffc6457", + "type": "github" + }, + "original": { + "owner": "johmsalas", + "repo": "text-case.nvim", + "type": "github" + } + }, + "nvim_plugin-lewis6991/gitsigns.nvim": { + "flake": false, + "locked": { + "lastModified": 1768948049, + "narHash": "sha256-J9BvYVydeeoBj5Op+jGcI+i3IppdpoZ9jHbod0X1JFo=", + "owner": "lewis6991", + "repo": "gitsigns.nvim", + "rev": "abf82a65f185bd54adc0679f74b7d6e1ada690c9", + "type": "github" + }, + "original": { + "owner": "lewis6991", + "repo": "gitsigns.nvim", + "type": "github" + } + }, + "nvim_plugin-lnc3l0t/glow.nvim": { + "flake": false, + "locked": { + "lastModified": 1693233815, + "narHash": "sha256-vdlwkIK2EkFviJmSiOqPWvc15xqJ9F2gHCC4ObJ5Qjk=", + "owner": "lnc3l0t", + "repo": "glow.nvim", + "rev": "5b38fb7b6e806cac62707a4aba8c10c5f14d5bb5", + "type": "github" + }, + "original": { + "owner": "lnc3l0t", + "repo": "glow.nvim", + "type": "github" + } + }, + "nvim_plugin-lukas-reineke/indent-blankline.nvim": { + "flake": false, + "locked": { + "lastModified": 1742224677, + "narHash": "sha256-0q/V+b4UrDRnaC/eRWOi9HU9a61vQSAM9/C8ZQyKt+Y=", + "owner": "lukas-reineke", + "repo": "indent-blankline.nvim", + "rev": "005b56001b2cb30bfa61b7986bc50657816ba4ba", + "type": "github" + }, + "original": { + "owner": "lukas-reineke", + "repo": "indent-blankline.nvim", + "type": "github" + } + }, + "nvim_plugin-m4xshen/hardtime.nvim": { + "flake": false, + "locked": { + "lastModified": 1757738091, + "narHash": "sha256-Jy9ARUHU1ySpSxxoS3hLRjxp5Lqt7juWN5Jnbdo0rg0=", + "owner": "m4xshen", + "repo": "hardtime.nvim", + "rev": "b4e431934af1fe224a3a801f632c008278cb7628", + "type": "github" + }, + "original": { + "owner": "m4xshen", + "repo": "hardtime.nvim", + "type": "github" + } + }, + "nvim_plugin-mbbill/undotree": { + "flake": false, + "locked": { + "lastModified": 1766990053, + "narHash": "sha256-5/SQjSjQPYIK55P2rNrgn9psOSNpWpqJzkpWmjo8Itg=", + "owner": "mbbill", + "repo": "undotree", + "rev": "178d19e00a643f825ea11d581b1684745d0c4eda", + "type": "github" + }, + "original": { + "owner": "mbbill", + "repo": "undotree", + "type": "github" + } + }, + "nvim_plugin-mfussenegger/nvim-lint": { + "flake": false, + "locked": { + "lastModified": 1767793421, + "narHash": "sha256-Ru+QklYFuwoeRvKlBXZcItuGvKDPbEq04sACKvTQds8=", + "owner": "mfussenegger", + "repo": "nvim-lint", + "rev": "ca6ea12daf0a4d92dc24c5c9ae22a1f0418ade37", + "type": "github" + }, + "original": { + "owner": "mfussenegger", + "repo": "nvim-lint", + "type": "github" + } + }, + "nvim_plugin-mrcjkb/rustaceanvim": { + "flake": false, + "locked": { + "lastModified": 1768768642, + "narHash": "sha256-tIIPP6RVzJTwEI2V5E7MZRM69cUV4xlNvwnF9PD71l4=", + "owner": "mrcjkb", + "repo": "rustaceanvim", + "rev": "d09c0639e7f68615db6845570241947198234cd6", + "type": "github" + }, + "original": { + "owner": "mrcjkb", + "repo": "rustaceanvim", + "type": "github" + } + }, + "nvim_plugin-neovim/nvim-lspconfig": { + "flake": false, + "locked": { + "lastModified": 1769036040, + "narHash": "sha256-+KjXMTkVVVt3rpxQHRtrlT74DDsfRnPNUlqi/pvIMxg=", + "owner": "neovim", + "repo": "nvim-lspconfig", + "rev": "419b082102fa813739588dd82e19a8b6b2442855", + "type": "github" + }, + "original": { + "owner": "neovim", + "repo": "nvim-lspconfig", + "type": "github" + } + }, + "nvim_plugin-nvim-lua/plenary.nvim": { + "flake": false, + "locked": { + "lastModified": 1753570668, + "narHash": "sha256-9Un7ekhBxcnmFE1xjCCFTZ7eqIbmXvQexpnhduAg4M0=", + "owner": "nvim-lua", + "repo": "plenary.nvim", + "rev": "b9fd5226c2f76c951fc8ed5923d85e4de065e509", + "type": "github" + }, + "original": { + "owner": "nvim-lua", + "repo": "plenary.nvim", + "type": "github" + } + }, + "nvim_plugin-nvim-lualine/lualine.nvim": { + "flake": false, + "locked": { + "lastModified": 1763865090, + "narHash": "sha256-OpLZH+sL5cj2rcP5/T+jDOnuxd1QWLHCt2RzloffZOA=", + "owner": "nvim-lualine", + "repo": "lualine.nvim", + "rev": "47f91c416daef12db467145e16bed5bbfe00add8", + "type": "github" + }, + "original": { + "owner": "nvim-lualine", + "repo": "lualine.nvim", + "type": "github" + } + }, + "nvim_plugin-nvim-telescope/telescope-file-browser.nvim": { + "flake": false, + "locked": { + "lastModified": 1754424906, + "narHash": "sha256-FlJ7w5Ywwq03E0oYdnFJFb+MMUMQMa+5QhDMy2O9tGQ=", + "owner": "nvim-telescope", + "repo": "telescope-file-browser.nvim", + "rev": "3610dc7dc91f06aa98b11dca5cc30dfa98626b7e", + "type": "github" + }, + "original": { + "owner": "nvim-telescope", + "repo": "telescope-file-browser.nvim", + "type": "github" + } + }, + "nvim_plugin-nvim-telescope/telescope-fzf-native.nvim": { + "flake": false, + "locked": { + "lastModified": 1762521376, + "narHash": "sha256-ChEM4jJonAE4qXd/dgTu2mdlpNBj5rEdpA8TgR38oRM=", + "owner": "nvim-telescope", + "repo": "telescope-fzf-native.nvim", + "rev": "6fea601bd2b694c6f2ae08a6c6fab14930c60e2c", + "type": "github" + }, + "original": { + "owner": "nvim-telescope", + "repo": "telescope-fzf-native.nvim", + "type": "github" + } + }, + "nvim_plugin-nvim-telescope/telescope-ui-select.nvim": { + "flake": false, + "locked": { + "lastModified": 1701723223, + "narHash": "sha256-YRhNmmG4gx9Ht8JwjQfbTjJyTHEuZmtP6lqnhOsk8bE=", + "owner": "nvim-telescope", + "repo": "telescope-ui-select.nvim", + "rev": "6e51d7da30bd139a6950adf2a47fda6df9fa06d2", + "type": "github" + }, + "original": { + "owner": "nvim-telescope", + "repo": "telescope-ui-select.nvim", + "type": "github" + } + }, + "nvim_plugin-nvim-telescope/telescope.nvim": { + "flake": false, + "locked": { + "lastModified": 1768858021, + "narHash": "sha256-u1IyXfbGG2WF08V2mkGfjinBD2glU6aik7GjTFxyAn0=", + "owner": "nvim-telescope", + "repo": "telescope.nvim", + "rev": "0d8b6eaa0b5ae6bb3d9785f7a3ba4a4c6c1b1af2", + "type": "github" + }, + "original": { + "owner": "nvim-telescope", + "repo": "telescope.nvim", + "type": "github" + } + }, + "nvim_plugin-nvim-tree/nvim-tree.lua": { + "flake": false, + "locked": { + "lastModified": 1769045035, + "narHash": "sha256-bRgYf9A2FZmlRFP7ixkfZqtOF+rQ+ju0vQ52qfmvY0A=", + "owner": "nvim-tree", + "repo": "nvim-tree.lua", + "rev": "869fc957edd43962c4cc7d70f0dca087056534c8", + "type": "github" + }, + "original": { + "owner": "nvim-tree", + "repo": "nvim-tree.lua", + "type": "github" + } + }, + "nvim_plugin-nvim-tree/nvim-web-devicons": { + "flake": false, + "locked": { + "lastModified": 1768089403, + "narHash": "sha256-x1ujwUXnRolP9SRUD7/Pb4/AZu+3YpC6CfGuq3Bn6Ew=", + "owner": "nvim-tree", + "repo": "nvim-web-devicons", + "rev": "803353450c374192393f5387b6a0176d0972b848", + "type": "github" + }, + "original": { + "owner": "nvim-tree", + "repo": "nvim-web-devicons", + "type": "github" + } + }, + "nvim_plugin-nvim-treesitter/nvim-treesitter-context": { + "flake": false, + "locked": { + "lastModified": 1765030629, + "narHash": "sha256-3NtwOA9d2ezLoo7qnzKAr6gwEdcpUqLc7ou4QI+9rDY=", + "owner": "nvim-treesitter", + "repo": "nvim-treesitter-context", + "rev": "64dd4cf3f6fd0ab17622c5ce15c91fc539c3f24a", + "type": "github" + }, + "original": { + "owner": "nvim-treesitter", + "repo": "nvim-treesitter-context", + "type": "github" + } + }, + "nvim_plugin-rafamadriz/friendly-snippets": { + "flake": false, + "locked": { + "lastModified": 1745949052, + "narHash": "sha256-FzApcTbWfFkBD9WsYMhaCyn6ky8UmpUC2io/co/eByM=", + "owner": "rafamadriz", + "repo": "friendly-snippets", + "rev": "572f5660cf05f8cd8834e096d7b4c921ba18e175", + "type": "github" + }, + "original": { + "owner": "rafamadriz", + "repo": "friendly-snippets", + "type": "github" + } + }, + "nvim_plugin-rcarriga/nvim-notify": { + "flake": false, + "locked": { + "lastModified": 1757190131, + "narHash": "sha256-h7STMjY+CBTqBkIDJXgtJz4WhNeQ02ES2Jesi3jZXeM=", + "owner": "rcarriga", + "repo": "nvim-notify", + "rev": "8701bece920b38ea289b457f902e2ad184131a5d", + "type": "github" + }, + "original": { + "owner": "rcarriga", + "repo": "nvim-notify", + "type": "github" + } + }, + "nvim_plugin-rmagatti/auto-session": { + "flake": false, + "locked": { + "lastModified": 1761853983, + "narHash": "sha256-9/SfXUAZIiPAS5ojvJCxDCxmuLoL/kIrAsNWAoLWFq4=", + "owner": "rmagatti", + "repo": "auto-session", + "rev": "292492ab7af4bd8b9e37e28508bc8ce995722fd5", + "type": "github" + }, + "original": { + "owner": "rmagatti", + "repo": "auto-session", + "type": "github" + } + }, + "nvim_plugin-ron-rs/ron.vim": { + "flake": false, + "locked": { + "lastModified": 1660904719, + "narHash": "sha256-8/xJmymtVGVz2avzlamgK1cNflZ3NRL+B3c7xxbI964=", + "owner": "ron-rs", + "repo": "ron.vim", + "rev": "f749e543975a82e8dd9a6e7df9600a1c098ae800", + "type": "github" + }, + "original": { + "owner": "ron-rs", + "repo": "ron.vim", + "type": "github" + } + }, + "nvim_plugin-saadparwaiz1/cmp_luasnip": { + "flake": false, + "locked": { + "lastModified": 1730707109, + "narHash": "sha256-86lKQPPyqFz8jzuLajjHMKHrYnwW6+QOcPyQEx6B+gw=", + "owner": "saadparwaiz1", + "repo": "cmp_luasnip", + "rev": "98d9cb5c2c38532bd9bdb481067b20fea8f32e90", + "type": "github" + }, + "original": { + "owner": "saadparwaiz1", + "repo": "cmp_luasnip", + "type": "github" + } + }, + "nvim_plugin-sindrets/diffview.nvim": { + "flake": false, + "locked": { + "lastModified": 1718279802, + "narHash": "sha256-SX+ybIzL/w6uyCy4iZKnWnzTFwqB1oXSgyYVAdpdKi8=", + "owner": "sindrets", + "repo": "diffview.nvim", + "rev": "4516612fe98ff56ae0415a259ff6361a89419b0a", + "type": "github" + }, + "original": { + "owner": "sindrets", + "repo": "diffview.nvim", + "type": "github" + } + }, + "nvim_plugin-stevearc/conform.nvim": { + "flake": false, + "locked": { + "lastModified": 1768760646, + "narHash": "sha256-M2mGNF8iARiQ6MzIFSSE/2BRDSj1+XEP/C44dwg8MQ8=", + "owner": "stevearc", + "repo": "conform.nvim", + "rev": "c2526f1cde528a66e086ab1668e996d162c75f4f", + "type": "github" + }, + "original": { + "owner": "stevearc", + "repo": "conform.nvim", + "type": "github" + } + }, + "nvim_plugin-stevearc/dressing.nvim": { + "flake": false, + "locked": { + "lastModified": 1739381641, + "narHash": "sha256-dBz+/gZA6O6fJy/GSgM6ZHGAR3MTGt/W1olzzTYRlgM=", + "owner": "stevearc", + "repo": "dressing.nvim", + "rev": "2d7c2db2507fa3c4956142ee607431ddb2828639", + "type": "github" + }, + "original": { + "owner": "stevearc", + "repo": "dressing.nvim", + "type": "github" + } + }, + "nvim_plugin-tpope/vim-sleuth": { + "flake": false, + "locked": { + "lastModified": 1726718493, + "narHash": "sha256-2Cr3h3uJvUL3CSoJs3aBFrkBeOBURSQItgQ4ep9sHXM=", + "owner": "tpope", + "repo": "vim-sleuth", + "rev": "be69bff86754b1aa5adcbb527d7fcd1635a84080", + "type": "github" + }, + "original": { + "owner": "tpope", + "repo": "vim-sleuth", + "type": "github" + } + }, + "nvim_plugin-uga-rosa/ccc.nvim": { + "flake": false, + "locked": { + "lastModified": 1746537659, + "narHash": "sha256-3TZ8VmvdgQ9n63m78C3r4OIUkVQHTHBvC24ixBdhTig=", + "owner": "uga-rosa", + "repo": "ccc.nvim", + "rev": "9d1a256e006decc574789dfc7d628ca11644d4c2", + "type": "github" + }, + "original": { + "owner": "uga-rosa", + "repo": "ccc.nvim", + "type": "github" + } + }, + "nvim_plugin-windwp/nvim-ts-autotag": { + "flake": false, + "locked": { + "lastModified": 1768909712, + "narHash": "sha256-XajbH3R1ONStQyYK6xQBE1cfGk3Y6tP/Mh9Ch90aKCk=", + "owner": "windwp", + "repo": "nvim-ts-autotag", + "rev": "db15f2e0df2f5db916e511e3fffb682ef2f6354f", + "type": "github" + }, + "original": { + "owner": "windwp", + "repo": "nvim-ts-autotag", + "type": "github" + } + }, + "opencode": { + "inputs": { + "nixpkgs": "nixpkgs_2" + }, + "locked": { + "lastModified": 1769447272, + "narHash": "sha256-IenoXyTkdfMyAnimjJzf5kSXvIWd1X2Nt4g6iJOmmH4=", + "owner": "anomalyco", + "repo": "opencode", + "rev": "37f1a1a4ef36eacb60ad5493db8aeb1130c5fa91", + "type": "github" + }, + "original": { + "owner": "anomalyco", + "repo": "opencode", + "type": "github" + } + }, + "root": { + "inputs": { + "common": "common", + "home-manager": "home-manager", + "nixpkgs": "nixpkgs", + "opencode": "opencode", + "ros_neovim": "ros_neovim" + } + }, + "ros_neovim": { + "inputs": { + "nixpkgs": "nixpkgs_3", + "nvim_plugin-Almo7aya/openingh.nvim": "nvim_plugin-Almo7aya/openingh.nvim", + "nvim_plugin-JoosepAlviste/nvim-ts-context-commentstring": "nvim_plugin-JoosepAlviste/nvim-ts-context-commentstring", + "nvim_plugin-L3MON4D3/LuaSnip": "nvim_plugin-L3MON4D3/LuaSnip", + "nvim_plugin-MeanderingProgrammer/render-markdown.nvim": "nvim_plugin-MeanderingProgrammer/render-markdown.nvim", + "nvim_plugin-MunifTanjim/nui.nvim": "nvim_plugin-MunifTanjim/nui.nvim", + "nvim_plugin-RRethy/vim-illuminate": "nvim_plugin-RRethy/vim-illuminate", + "nvim_plugin-Saecki/crates.nvim": "nvim_plugin-Saecki/crates.nvim", + "nvim_plugin-aznhe21/actions-preview.nvim": "nvim_plugin-aznhe21/actions-preview.nvim", + "nvim_plugin-b0o/schemastore.nvim": "nvim_plugin-b0o/schemastore.nvim", + "nvim_plugin-catppuccin/nvim": "nvim_plugin-catppuccin/nvim", + "nvim_plugin-chrisgrieser/nvim-early-retirement": "nvim_plugin-chrisgrieser/nvim-early-retirement", + "nvim_plugin-declancm/cinnamon.nvim": "nvim_plugin-declancm/cinnamon.nvim", + "nvim_plugin-direnv/direnv.vim": "nvim_plugin-direnv/direnv.vim", + "nvim_plugin-echasnovski/mini.nvim": "nvim_plugin-echasnovski/mini.nvim", + "nvim_plugin-folke/lazy.nvim": "nvim_plugin-folke/lazy.nvim", + "nvim_plugin-folke/lazydev.nvim": "nvim_plugin-folke/lazydev.nvim", + "nvim_plugin-folke/trouble.nvim": "nvim_plugin-folke/trouble.nvim", + "nvim_plugin-folke/which-key.nvim": "nvim_plugin-folke/which-key.nvim", + "nvim_plugin-ggml-org/llama.vim": "nvim_plugin-ggml-org/llama.vim", + "nvim_plugin-hrsh7th/cmp-buffer": "nvim_plugin-hrsh7th/cmp-buffer", + "nvim_plugin-hrsh7th/cmp-nvim-lsp": "nvim_plugin-hrsh7th/cmp-nvim-lsp", + "nvim_plugin-hrsh7th/cmp-path": "nvim_plugin-hrsh7th/cmp-path", + "nvim_plugin-hrsh7th/nvim-cmp": "nvim_plugin-hrsh7th/nvim-cmp", + "nvim_plugin-j-hui/fidget.nvim": "nvim_plugin-j-hui/fidget.nvim", + "nvim_plugin-johmsalas/text-case.nvim": "nvim_plugin-johmsalas/text-case.nvim", + "nvim_plugin-lewis6991/gitsigns.nvim": "nvim_plugin-lewis6991/gitsigns.nvim", + "nvim_plugin-lnc3l0t/glow.nvim": "nvim_plugin-lnc3l0t/glow.nvim", + "nvim_plugin-lukas-reineke/indent-blankline.nvim": "nvim_plugin-lukas-reineke/indent-blankline.nvim", + "nvim_plugin-m4xshen/hardtime.nvim": "nvim_plugin-m4xshen/hardtime.nvim", + "nvim_plugin-mbbill/undotree": "nvim_plugin-mbbill/undotree", + "nvim_plugin-mfussenegger/nvim-lint": "nvim_plugin-mfussenegger/nvim-lint", + "nvim_plugin-mrcjkb/rustaceanvim": "nvim_plugin-mrcjkb/rustaceanvim", + "nvim_plugin-neovim/nvim-lspconfig": "nvim_plugin-neovim/nvim-lspconfig", + "nvim_plugin-nvim-lua/plenary.nvim": "nvim_plugin-nvim-lua/plenary.nvim", + "nvim_plugin-nvim-lualine/lualine.nvim": "nvim_plugin-nvim-lualine/lualine.nvim", + "nvim_plugin-nvim-telescope/telescope-file-browser.nvim": "nvim_plugin-nvim-telescope/telescope-file-browser.nvim", + "nvim_plugin-nvim-telescope/telescope-fzf-native.nvim": "nvim_plugin-nvim-telescope/telescope-fzf-native.nvim", + "nvim_plugin-nvim-telescope/telescope-ui-select.nvim": "nvim_plugin-nvim-telescope/telescope-ui-select.nvim", + "nvim_plugin-nvim-telescope/telescope.nvim": "nvim_plugin-nvim-telescope/telescope.nvim", + "nvim_plugin-nvim-tree/nvim-tree.lua": "nvim_plugin-nvim-tree/nvim-tree.lua", + "nvim_plugin-nvim-tree/nvim-web-devicons": "nvim_plugin-nvim-tree/nvim-web-devicons", + "nvim_plugin-nvim-treesitter/nvim-treesitter-context": "nvim_plugin-nvim-treesitter/nvim-treesitter-context", + "nvim_plugin-rafamadriz/friendly-snippets": "nvim_plugin-rafamadriz/friendly-snippets", + "nvim_plugin-rcarriga/nvim-notify": "nvim_plugin-rcarriga/nvim-notify", + "nvim_plugin-rmagatti/auto-session": "nvim_plugin-rmagatti/auto-session", + "nvim_plugin-ron-rs/ron.vim": "nvim_plugin-ron-rs/ron.vim", + "nvim_plugin-saadparwaiz1/cmp_luasnip": "nvim_plugin-saadparwaiz1/cmp_luasnip", + "nvim_plugin-sindrets/diffview.nvim": "nvim_plugin-sindrets/diffview.nvim", + "nvim_plugin-stevearc/conform.nvim": "nvim_plugin-stevearc/conform.nvim", + "nvim_plugin-stevearc/dressing.nvim": "nvim_plugin-stevearc/dressing.nvim", + "nvim_plugin-tpope/vim-sleuth": "nvim_plugin-tpope/vim-sleuth", + "nvim_plugin-uga-rosa/ccc.nvim": "nvim_plugin-uga-rosa/ccc.nvim", + "nvim_plugin-windwp/nvim-ts-autotag": "nvim_plugin-windwp/nvim-ts-autotag", + "rust-overlay": "rust-overlay" + }, + "locked": { + "lastModified": 1769125736, + "narHash": "sha256-anQb65WdwbW+r/elOicrhDAhF+pjZBnur5ei9/rhq2s=", + "ref": "refs/heads/master", + "rev": "fedaece7199f49d1317856fe22f20b7467639409", + "revCount": 332, + "type": "git", + "url": "https://git.joshuabell.xyz/ringofstorms/nvim" + }, + "original": { + "type": "git", + "url": "https://git.joshuabell.xyz/ringofstorms/nvim" + } + }, + "rust-overlay": { + "inputs": { + "nixpkgs": [ + "ros_neovim", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1769091129, + "narHash": "sha256-Jj/vIHjiu4OdDIrDXZ3xOPCJrMZZKzhE2UIVXV/NYzY=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "131e22d6a6d54ab72aeef6a5a661ab7005b4c596", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake/default-vm/flake.nix b/flake/default-vm/flake.nix index a853f67..3830e21 100644 --- a/flake/default-vm/flake.nix +++ b/flake/default-vm/flake.nix @@ -8,11 +8,6 @@ inputs.nixpkgs.follows = "nixpkgs"; }; - nixos-generators = { - url = "github:nix-community/nixos-generators"; - inputs.nixpkgs.follows = "nixpkgs"; - }; - opencode.url = "github:anomalyco/opencode"; common.url = "git+https://git.joshuabell.xyz/ringofstorms/dotfiles?dir=flakes/common"; ros_neovim.url = "git+https://git.joshuabell.xyz/ringofstorms/nvim"; @@ -22,7 +17,6 @@ { self, nixpkgs, - nixos-generators, ... }@inputs: let @@ -52,6 +46,33 @@ allowUnfreePredicate = (_: true); }; + # Root filesystem configuration for disk image + # Use /dev/vda1 directly instead of by-label to avoid initrd label detection issues + fileSystems."/" = { + device = "/dev/vda1"; + autoResize = true; + fsType = "ext4"; + }; + + # Boot loader configuration for disk image + boot.loader.grub.device = lib.mkDefault "/dev/vda"; + + # Explicitly load virtio block device module in initrd + boot.initrd.availableKernelModules = [ "virtio_blk" "virtio_pci" "virtio" ]; + + # Serial console for headless operation with QEMU -nographic + boot.kernelParams = [ "console=ttyS0,115200n8" ]; + + # GRUB serial console configuration + boot.loader.grub.extraConfig = '' + serial --unit=0 --speed=115200 + terminal_input serial + terminal_output serial + ''; + + # Getty on serial console for login prompt + systemd.services."serial-getty@ttyS0".enable = true; + # Distinctive hostname for easy identification networking.hostName = "qvm-dev"; @@ -184,9 +205,60 @@ SCCACHE_DIR = "/cache/sccache"; }; - # Ensure workspace directory exists + # Ensure workspace and cache directories exist systemd.tmpfiles.rules = [ "d /workspace 0755 root root -" + "d /cache 0755 root root -" + "d /cache/cargo 0755 root root -" + "d /cache/target 0755 root root -" + "d /cache/pnpm 0755 root root -" + "d /cache/sccache 0755 root root -" + ]; + + # Systemd mount units for cache directories + # The NixOS VM runner doesn't include custom fileSystems entries in the generated fstab, + # so we use systemd mount units to automount the 9p virtfs shares at boot. + systemd.mounts = [ + { + what = "cargo_home"; + where = "/cache/cargo"; + type = "9p"; + options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; + wantedBy = [ "multi-user.target" ]; + after = [ "systemd-modules-load.service" ]; + } + { + what = "cargo_target"; + where = "/cache/target"; + type = "9p"; + options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; + wantedBy = [ "multi-user.target" ]; + after = [ "systemd-modules-load.service" ]; + } + { + what = "pnpm_store"; + where = "/cache/pnpm"; + type = "9p"; + options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; + wantedBy = [ "multi-user.target" ]; + after = [ "systemd-modules-load.service" ]; + } + { + what = "sccache"; + where = "/cache/sccache"; + type = "9p"; + options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; + wantedBy = [ "multi-user.target" ]; + after = [ "systemd-modules-load.service" ]; + } + { + what = "opencode_config"; + where = "/root/.config/opencode"; + type = "9p"; + options = "trans=virtio,version=9p2000.L,msize=104857600,nofail"; + wantedBy = [ "multi-user.target" ]; + after = [ "systemd-modules-load.service" ]; + } ]; # Essential packages for development @@ -224,6 +296,16 @@ # GB disk size virtualisation.diskSize = 40 * 1024; + # NOTE: Using 9p virtfs for filesystem sharing + # The NixOS VM runner doesn't support virtio-fs out of the box. + # We use 9p (-virtfs) which is the standard method for QEMU VMs. + # + # See: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualisation/qemu-vm.nix#L530 + # The sharedDirectories option hardcodes: -virtfs local,path=...,security_model=... + # + # 9p mounts are configured via QEMU_OPTS environment variable: + # -virtfs local,path=$HOST_PATH,mount_tag=$TAG,security_model=mapped-xattr,msize=104857600 + system.stateVersion = stateVersion; }; @@ -243,8 +325,28 @@ # Runnable VM script (./result/bin/run-qvm-dev-vm) packages.${system} = { + # QCOW2 disk image for base VM + # Using make-disk-image.nix with sufficient memSize to avoid OOM during build + default = import "${nixpkgs}/nixos/lib/make-disk-image.nix" { + inherit pkgs; + lib = nixpkgs.lib; + config = baseVm.config; + + # Disk image settings + format = "qcow2"; + diskSize = "auto"; + additionalSpace = "2G"; # Extra space beyond closure size (default 512M) + partitionTableType = "legacy"; # Use simple MBR instead of hybrid + label = "nixos"; # Explicit label matching fileSystems."/" device + + # CRITICAL: Increase build VM memory to 16GB for large closures + # The closure includes NixOS + home-manager + opencode + dev tools (~2GB+) + # Default 512MB and even 2GB was insufficient, causing OOM during cptofs + memSize = 16384; + }; + + # Keep the runner script as an alternative for debugging vm = baseVm.config.system.build.vm; - default = baseVm.config.system.build.vm; }; apps.${system}.default = { diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..1072f75 --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module qvm + +go 1.25.5 + +require ( + github.com/BurntSushi/toml v1.6.0 + github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242 + github.com/samber/mo v1.16.0 + github.com/spf13/cobra v1.10.2 +) + +require ( + github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/spf13/pflag v1.0.9 // indirect + golang.org/x/sys v0.30.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..da0debe --- /dev/null +++ b/go.sum @@ -0,0 +1,56 @@ +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e h1:SCnqm8SjSa0QqRxXbo5YY//S+OryeJioe17nK+iDZpg= +github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e/go.mod h1:o129ljs6alsIQTc8d6eweihqpmmrbxZ2g1jhgjhPykI= +github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242 h1:rh6rt8pF5U4iyQ86h6lRDenJoX4ht2wFnZXB9ogIrIM= +github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242/go.mod h1:LGHUtlhsY4vRGM6AHejEQKVI5e3eHbSylMHwTSpQtVw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/mo v1.16.0 h1:qpEPCI63ou6wXlsNDMLE0IIN8A+devbGX/K1xdgr4b4= +github.com/samber/mo v1.16.0/go.mod h1:DlgzJ4SYhOh41nP1L9kh9rDNERuf8IqWSAs+gj2Vxag= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..c060c79 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,47 @@ +package config + +import ( + "fmt" + "os" + "strconv" + + "github.com/BurntSushi/toml" +) + +type Config struct { + VM VMConfig `toml:"vm"` +} + +type VMConfig struct { + Memory string `toml:"memory"` + CPUs int `toml:"cpus"` +} + +func Load() (*Config, error) { + cfg := &Config{ + VM: VMConfig{ + Memory: "30G", + CPUs: 30, + }, + } + + if _, err := os.Stat(ConfigFile); err == nil { + if _, err := toml.DecodeFile(ConfigFile, cfg); err != nil { + return nil, fmt.Errorf("failed to parse config file %s: %w", ConfigFile, err) + } + } + + if memEnv := os.Getenv("QVM_MEMORY"); memEnv != "" { + cfg.VM.Memory = memEnv + } + + if cpusEnv := os.Getenv("QVM_CPUS"); cpusEnv != "" { + cpus, err := strconv.Atoi(cpusEnv) + if err != nil { + return nil, fmt.Errorf("QVM_CPUS must be a valid integer: %w", err) + } + cfg.VM.CPUs = cpus + } + + return cfg, nil +} diff --git a/internal/config/paths.go b/internal/config/paths.go new file mode 100644 index 0000000..d8ed7a7 --- /dev/null +++ b/internal/config/paths.go @@ -0,0 +1,125 @@ +// Package config handles QVM configuration loading and management. +package config + +import ( + "os" + "path/filepath" +) + +// XDG-compliant directory paths matching bash lib/common.sh exactly + +var ( + // DataDir is the base directory for QVM data files (base image, runner) + // Defaults to $HOME/.local/share/qvm + DataDir = getXDGPath("XDG_DATA_HOME", ".local/share", "qvm") + + // StateDir is the directory for QVM runtime state (overlay, PID, port files) + // Defaults to $HOME/.local/state/qvm + StateDir = getXDGPath("XDG_STATE_HOME", ".local/state", "qvm") + + // CacheDir is the directory for shared build caches (cargo, pnpm, sccache) + // Defaults to $HOME/.cache/qvm + CacheDir = getXDGPath("XDG_CACHE_HOME", ".cache", "qvm") + + // ConfigDir is the directory for QVM configuration (flake, qvm.toml) + // Defaults to $HOME/.config/qvm + ConfigDir = getXDGPath("XDG_CONFIG_HOME", ".config", "qvm") +) + +// Path constants for VM artifacts +var ( + // BaseImage is the path to the base VM image (read-only) + BaseImage = filepath.Join(DataDir, "base.qcow2") + + // VMRunner is the path to the VM runner script + VMRunner = filepath.Join(DataDir, "run-vm") + + // Overlay is the path to the VM overlay image (copy-on-write) + Overlay = filepath.Join(StateDir, "qvm-dev.qcow2") + + // PIDFile is the path to the VM process ID file + PIDFile = filepath.Join(StateDir, "vm.pid") + + // SSHPortFile is the path to the SSH port file + SSHPortFile = filepath.Join(StateDir, "ssh.port") + + // SerialLog is the path to the VM serial console log + SerialLog = filepath.Join(StateDir, "serial.log") + + // WorkspacesFile is the path to the workspaces registry JSON + WorkspacesFile = filepath.Join(StateDir, "workspaces.json") + + // QMPSocket is the path to the QMP (QEMU Machine Protocol) socket + QMPSocket = filepath.Join(StateDir, "qmp.sock") + + // UserFlake is the path to the user's customizable NixOS flake + UserFlake = filepath.Join(ConfigDir, "flake") + + // ConfigFile is the path to the QVM TOML configuration file + ConfigFile = filepath.Join(ConfigDir, "qvm.toml") +) + +// Cache directories for 9p mounts (shared between host and VM) +var ( + // CargoHome is the shared Cargo registry/cache directory + CargoHome = filepath.Join(CacheDir, "cargo-home") + + // CargoTarget is the shared Cargo build artifacts directory + CargoTarget = filepath.Join(CacheDir, "cargo-target") + + // PnpmStore is the shared pnpm content-addressable store + PnpmStore = filepath.Join(CacheDir, "pnpm-store") + + // Sccache is the shared sccache compilation cache + Sccache = filepath.Join(CacheDir, "sccache") +) + +// Host config directories to mount in VM (read-write for tools that need it) +var ( + // HostOpencodeConfig is the path to the host's opencode configuration + // Defaults to $HOME/.config/opencode + HostOpencodeConfig = getXDGPath("XDG_CONFIG_HOME", ".config", "opencode") +) + +// getXDGPath returns an XDG-compliant path with fallback. +// Args: +// - xdgEnv: XDG environment variable to check (e.g., "XDG_DATA_HOME") +// - fallbackPath: relative path from $HOME if xdgEnv is not set (e.g., ".local/share") +// - suffix: additional path suffix to append (e.g., "qvm") +// +// Returns the resolved absolute path. +func getXDGPath(xdgEnv, fallbackPath, suffix string) string { + base := os.Getenv(xdgEnv) + if base == "" { + home := os.Getenv("HOME") + if home == "" { + // Fallback to current directory if HOME is not set + home = "." + } + base = filepath.Join(home, fallbackPath) + } + return filepath.Join(base, suffix) +} + +// EnsureDirs creates all required QVM directories if they don't exist. +// Returns error if directory creation fails. +func EnsureDirs() error { + dirs := []string{ + DataDir, + StateDir, + CacheDir, + ConfigDir, + CargoHome, + CargoTarget, + PnpmStore, + Sccache, + } + + for _, dir := range dirs { + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + return nil +} diff --git a/internal/logging/logger.go b/internal/logging/logger.go new file mode 100644 index 0000000..61a71b9 --- /dev/null +++ b/internal/logging/logger.go @@ -0,0 +1,27 @@ +package logging + +import ( + "fmt" + "os" +) + +// Simple logging package matching bash script format: +// [INFO] message +// [WARN] message +// [ERROR] message +// No timestamps or additional metadata + +// Info prints an informational message +func Info(msg string) { + fmt.Printf("[INFO] %s\n", msg) +} + +// Warn prints a warning message +func Warn(msg string) { + fmt.Printf("[WARN] %s\n", msg) +} + +// Error prints an error message +func Error(msg string) { + fmt.Fprintf(os.Stderr, "[ERROR] %s\n", msg) +} diff --git a/internal/qmp/client.go b/internal/qmp/client.go new file mode 100644 index 0000000..958a271 --- /dev/null +++ b/internal/qmp/client.go @@ -0,0 +1,75 @@ +package qmp + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/digitalocean/go-qemu/qmp" + "github.com/samber/mo" +) + +type Client struct { + monitor qmp.Monitor +} + +type VMStatus struct { + Running bool + Singlestep bool + Status string +} + +func Connect(socketPath string) mo.Result[*Client] { + monitor, err := qmp.NewSocketMonitor("unix", socketPath, 2*time.Second) + if err != nil { + return mo.Err[*Client](fmt.Errorf("failed to create socket monitor: %w", err)) + } + + if err := monitor.Connect(); err != nil { + return mo.Err[*Client](fmt.Errorf("failed to connect to QMP socket: %w", err)) + } + + return mo.Ok(&Client{monitor: monitor}) +} + +func (c *Client) Status() mo.Result[VMStatus] { + type statusResult struct { + ID string `json:"id"` + Return struct { + Running bool `json:"running"` + Singlestep bool `json:"singlestep"` + Status string `json:"status"` + } `json:"return"` + } + + cmd := []byte(`{"execute":"query-status"}`) + raw, err := c.monitor.Run(cmd) + if err != nil { + return mo.Err[VMStatus](fmt.Errorf("failed to execute query-status: %w", err)) + } + + var result statusResult + if err := json.Unmarshal(raw, &result); err != nil { + return mo.Err[VMStatus](fmt.Errorf("failed to parse status response: %w", err)) + } + + return mo.Ok(VMStatus{ + Running: result.Return.Running, + Singlestep: result.Return.Singlestep, + Status: result.Return.Status, + }) +} + +func (c *Client) Shutdown() mo.Result[struct{}] { + cmd := []byte(`{"execute":"system_powerdown"}`) + _, err := c.monitor.Run(cmd) + if err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to execute system_powerdown: %w", err)) + } + + return mo.Ok(struct{}{}) +} + +func (c *Client) Close() error { + return c.monitor.Disconnect() +} diff --git a/internal/virtiofsd/manager.go b/internal/virtiofsd/manager.go new file mode 100644 index 0000000..faeffe4 --- /dev/null +++ b/internal/virtiofsd/manager.go @@ -0,0 +1,234 @@ +package virtiofsd + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/samber/mo" +) + +type Manager struct { + stateDir string + pids map[string]int +} + +func NewManager(stateDir string) *Manager { + return &Manager{ + stateDir: stateDir, + pids: make(map[string]int), + } +} + +func findVirtiofsd() (string, error) { + // First try PATH + if path, err := exec.LookPath("virtiofsd"); err == nil { + return path, nil + } + + // Fall back to nix + cmd := exec.Command("nix", "path-info", "nixpkgs#virtiofsd") + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("virtiofsd not found in PATH and nix lookup failed: %w", err) + } + + storePath := strings.TrimSpace(string(output)) + virtiofsdPath := filepath.Join(storePath, "bin", "virtiofsd") + + if _, err := os.Stat(virtiofsdPath); err != nil { + return "", fmt.Errorf("virtiofsd binary not found at %s", virtiofsdPath) + } + + return virtiofsdPath, nil +} + +func (m *Manager) StartMount(mount Mount) mo.Result[int] { + if err := m.CleanStale([]Mount{mount}); err != nil { + return mo.Err[int](fmt.Errorf("failed to clean stale socket for %s: %w", mount.Tag, err)) + } + + if err := os.MkdirAll(mount.HostPath, 0755); err != nil { + return mo.Err[int](fmt.Errorf("failed to create host directory %s: %w", mount.HostPath, err)) + } + + virtiofsd, err := findVirtiofsd() + if err != nil { + return mo.Err[int](err) + } + + cmd := exec.Command(virtiofsd, + "--socket-path="+mount.SocketPath, + "--shared-dir="+mount.HostPath, + "--cache=auto", + ) + + if err := cmd.Start(); err != nil { + return mo.Err[int](fmt.Errorf("failed to start virtiofsd for %s: %w", mount.Tag, err)) + } + + pid := cmd.Process.Pid + m.pids[mount.Tag] = pid + + pidFile := m.pidFilePath(mount.Tag) + if err := os.WriteFile(pidFile, []byte(strconv.Itoa(pid)), 0644); err != nil { + _ = cmd.Process.Kill() + return mo.Err[int](fmt.Errorf("failed to write PID file for %s: %w", mount.Tag, err)) + } + + for i := 0; i < 50; i++ { + if _, err := os.Stat(mount.SocketPath); err == nil { + return mo.Ok(pid) + } + time.Sleep(100 * time.Millisecond) + } + + _ = m.StopMount(mount) + return mo.Err[int](fmt.Errorf("virtiofsd socket for %s did not appear within 5 seconds", mount.Tag)) +} + +func (m *Manager) StopMount(mount Mount) mo.Result[struct{}] { + pidFile := m.pidFilePath(mount.Tag) + pidBytes, err := os.ReadFile(pidFile) + if err != nil { + if os.IsNotExist(err) { + return mo.Ok(struct{}{}) + } + return mo.Err[struct{}](fmt.Errorf("failed to read PID file for %s: %w", mount.Tag, err)) + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + return mo.Err[struct{}](fmt.Errorf("invalid PID in file for %s: %w", mount.Tag, err)) + } + + process, err := os.FindProcess(pid) + if err != nil { + _ = os.Remove(pidFile) + _ = os.Remove(mount.SocketPath) + return mo.Ok(struct{}{}) + } + + if err := process.Signal(syscall.SIGTERM); err != nil { + _ = os.Remove(pidFile) + _ = os.Remove(mount.SocketPath) + return mo.Ok(struct{}{}) + } + + done := make(chan bool, 1) + go func() { + _, _ = process.Wait() + done <- true + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + _ = process.Signal(syscall.SIGKILL) + <-done + } + + _ = os.Remove(pidFile) + _ = os.Remove(mount.SocketPath) + delete(m.pids, mount.Tag) + + return mo.Ok(struct{}{}) +} + +func (m *Manager) StartAll(mounts []Mount) mo.Result[struct{}] { + started := []Mount{} + + for _, mount := range mounts { + result := m.StartMount(mount) + if result.IsError() { + for i := len(started) - 1; i >= 0; i-- { + _ = m.StopMount(started[i]) + } + return mo.Err[struct{}](fmt.Errorf("failed to start mount %s: %w", mount.Tag, result.Error())) + } + started = append(started, mount) + } + + return mo.Ok(struct{}{}) +} + +func (m *Manager) StopAll() mo.Result[struct{}] { + files, err := filepath.Glob(filepath.Join(m.stateDir, "virtiofsd-*.pid")) + if err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to list PID files: %w", err)) + } + + for _, pidFile := range files { + pidBytes, err := os.ReadFile(pidFile) + if err != nil { + continue + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + continue + } + + if process, err := os.FindProcess(pid); err == nil { + _ = process.Signal(syscall.SIGTERM) + time.Sleep(100 * time.Millisecond) + _ = process.Signal(syscall.SIGKILL) + } + + _ = os.Remove(pidFile) + } + + sockFiles, err := filepath.Glob(filepath.Join(m.stateDir, "*.sock")) + if err == nil { + for _, sockFile := range sockFiles { + _ = os.Remove(sockFile) + } + } + + m.pids = make(map[string]int) + + return mo.Ok(struct{}{}) +} + +func (m *Manager) CleanStale(mounts []Mount) error { + for _, mount := range mounts { + if _, err := os.Stat(mount.SocketPath); err == nil { + pidFile := m.pidFilePath(mount.Tag) + pidBytes, err := os.ReadFile(pidFile) + if err != nil { + _ = os.Remove(mount.SocketPath) + continue + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + _ = os.Remove(mount.SocketPath) + _ = os.Remove(pidFile) + continue + } + + process, err := os.FindProcess(pid) + if err != nil { + _ = os.Remove(mount.SocketPath) + _ = os.Remove(pidFile) + continue + } + + if err := process.Signal(syscall.Signal(0)); err != nil { + _ = os.Remove(mount.SocketPath) + _ = os.Remove(pidFile) + } + } + } + + return nil +} + +func (m *Manager) pidFilePath(tag string) string { + return filepath.Join(m.stateDir, "virtiofsd-"+tag+".pid") +} diff --git a/internal/virtiofsd/mount.go b/internal/virtiofsd/mount.go new file mode 100644 index 0000000..d000570 --- /dev/null +++ b/internal/virtiofsd/mount.go @@ -0,0 +1,51 @@ +package virtiofsd + +import ( + "path/filepath" + "qvm/internal/config" +) + +// Mount represents a single virtiofsd mount configuration +type Mount struct { + Tag string // Mount tag (e.g., "cargo_home", "ws_abc12345") + HostPath string // Path on host to share + SocketPath string // Path to virtiofsd socket +} + +// DefaultCacheMounts returns the standard cache mounts for cargo, pnpm, and sccache. +// These are shared across all projects and mounted at VM start. +func DefaultCacheMounts() []Mount { + return []Mount{ + { + Tag: "cargo_home", + HostPath: config.CargoHome, + SocketPath: filepath.Join(config.StateDir, "cargo_home.sock"), + }, + { + Tag: "cargo_target", + HostPath: config.CargoTarget, + SocketPath: filepath.Join(config.StateDir, "cargo_target.sock"), + }, + { + Tag: "pnpm_store", + HostPath: config.PnpmStore, + SocketPath: filepath.Join(config.StateDir, "pnpm_store.sock"), + }, + { + Tag: "sccache", + HostPath: config.Sccache, + SocketPath: filepath.Join(config.StateDir, "sccache.sock"), + }, + } +} + +// WorkspaceMount creates a Mount configuration for a single workspace. +// mountTag should be the workspace's mount tag (e.g., "ws_abc12345") +// hostPath is the absolute path on the host to share +func WorkspaceMount(mountTag, hostPath string) Mount { + return Mount{ + Tag: mountTag, + HostPath: hostPath, + SocketPath: filepath.Join(config.StateDir, mountTag+".sock"), + } +} diff --git a/internal/vm/lifecycle.go b/internal/vm/lifecycle.go new file mode 100644 index 0000000..2c4df4b --- /dev/null +++ b/internal/vm/lifecycle.go @@ -0,0 +1,379 @@ +package vm + +import ( + "fmt" + "os" + "os/exec" + "qvm/internal/config" + "qvm/internal/logging" + "qvm/internal/workspace" + "strconv" + "strings" + "syscall" + "time" + + "github.com/samber/mo" +) + +// VMStatus represents the current state of the VM +type VMStatus struct { + Running bool + PID int + SSHPort int +} + +// Mount represents a 9p filesystem mount +type Mount struct { + Tag string + HostPath string +} + +// Start launches the VM with all configured mounts. +// Sequence: +// 1. Check if VM is already running (via PID file and process check) +// 2. Ensure all required directories exist +// 3. Build mount list (cache mounts + workspace mounts from registry) +// 4. Find available SSH port +// 5. Build and start VM via runner script with 9p virtfs mounts +// 6. Write PID and SSH port to state files +// 7. Wait for SSH to become available (60 second timeout) +// +// Returns error if any step fails. +func Start(cfg *config.Config, reg *workspace.Registry) mo.Result[struct{}] { + // 1. Check if already running + if IsRunning() { + return mo.Err[struct{}](fmt.Errorf("VM is already running")) + } + + // 2. Ensure directories exist + if err := config.EnsureDirs(); err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to create directories: %w", err)) + } + + // 2a. Check if base image exists + if _, err := os.Stat(config.BaseImage); os.IsNotExist(err) { + return mo.Err[struct{}](fmt.Errorf("base image not found at %s - run 'qvm rebuild' first", config.BaseImage)) + } + + // 2b. Create overlay if it doesn't exist (backed by base image) + if _, err := os.Stat(config.Overlay); os.IsNotExist(err) { + if _, err := os.Stat(config.BaseImage); os.IsNotExist(err) { + return mo.Err[struct{}](fmt.Errorf("base image not found at %s - run 'qvm rebuild' first", config.BaseImage)) + } + + logging.Info("Creating overlay image backed by base image...") + cmd := exec.Command("qemu-img", "create", "-f", "qcow2", + "-F", "qcow2", "-b", config.BaseImage, config.Overlay) + if output, err := cmd.CombinedOutput(); err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to create overlay: %s: %w", string(output), err)) + } + } + + // 3. Build mount list (for 9p virtfs) + mounts := []Mount{ + {Tag: "cargo_home", HostPath: config.CargoHome}, + {Tag: "cargo_target", HostPath: config.CargoTarget}, + {Tag: "pnpm_store", HostPath: config.PnpmStore}, + {Tag: "sccache", HostPath: config.Sccache}, + } + + // Add opencode config mount if directory exists + if _, err := os.Stat(config.HostOpencodeConfig); err == nil { + mounts = append(mounts, Mount{ + Tag: "opencode_config", + HostPath: config.HostOpencodeConfig, + }) + } + + // Add workspace mounts from registry + for _, ws := range reg.List() { + mounts = append(mounts, Mount{ + Tag: ws.MountTag, + HostPath: ws.HostPath, + }) + } + + // 4. Find available SSH port + sshPort, err := findAvailablePort(2222) + if err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to find available SSH port: %w", err)) + } + + // 5. Build QEMU command and start VM directly + args := buildQEMUArgs(cfg, sshPort, mounts) + cmd := exec.Command("qemu-system-x86_64", args...) + + cmd.Stdout = nil + cmd.Stderr = nil + cmd.Stdin = nil + + if err := cmd.Run(); err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to start QEMU: %w", err)) + } + + logging.Info("Waiting for VM to daemonize...") + pidFileReady := false + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if _, err := os.Stat(config.PIDFile); err == nil { + pidFileReady = true + break + } + } + + if !pidFileReady { + return mo.Err[struct{}](fmt.Errorf("QEMU did not create PID file after 5 seconds")) + } + + pidBytes, err := os.ReadFile(config.PIDFile) + if err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to read PID file: %w", err)) + } + pid := strings.TrimSpace(string(pidBytes)) + + logging.Info("VM started with PID " + pid) + + if err := os.WriteFile(config.SSHPortFile, []byte(strconv.Itoa(sshPort)), 0644); err != nil { + if pidBytes, err := os.ReadFile(config.PIDFile); err == nil { + if pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))); err == nil { + if process, err := os.FindProcess(pid); err == nil { + _ = process.Kill() + } + } + } + _ = os.Remove(config.PIDFile) + return mo.Err[struct{}](fmt.Errorf("failed to write SSH port file: %w", err)) + } + + // 7. Wait for SSH + if err := waitForSSH(sshPort, 120*time.Second); err != nil { + _ = cmd.Process.Kill() + _ = os.Remove(config.PIDFile) + _ = os.Remove(config.SSHPortFile) + return mo.Err[struct{}](fmt.Errorf("VM started but SSH not available: %w", err)) + } + + return mo.Ok(struct{}{}) +} + +// Stop gracefully shuts down the VM. +// Sequence: +// 1. Read PID from file +// 2. Send SIGTERM to the process +// 3. Wait up to 30 seconds for graceful shutdown (poll every second) +// 4. If still running, send SIGKILL +// 5. Clean up PID and port files +// +// Returns success even if VM is not running (idempotent). +func Stop() mo.Result[struct{}] { + // 1. Read PID file + pidBytes, err := os.ReadFile(config.PIDFile) + if err != nil { + if os.IsNotExist(err) { + // Not running + return mo.Ok(struct{}{}) + } + return mo.Err[struct{}](fmt.Errorf("failed to read PID file: %w", err)) + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + return mo.Err[struct{}](fmt.Errorf("invalid PID in file: %w", err)) + } + + // Check if process exists + process, err := os.FindProcess(pid) + if err != nil { + // Process doesn't exist, clean up + cleanupStateFiles() + return mo.Ok(struct{}{}) + } + + // 2. Send SIGTERM for graceful shutdown + if err := process.Signal(syscall.SIGTERM); err != nil { + // Process already gone + cleanupStateFiles() + return mo.Ok(struct{}{}) + } + + // 3. Wait up to 30 seconds for process to exit (poll every second) + for i := 0; i < 30; i++ { + time.Sleep(1 * time.Second) + + // Check if process still exists by sending signal 0 + if err := process.Signal(syscall.Signal(0)); err != nil { + // Process no longer exists + cleanupStateFiles() + return mo.Ok(struct{}{}) + } + } + + // 4. Timeout, force kill + _ = process.Signal(syscall.SIGKILL) + + // Wait a moment for SIGKILL to take effect + time.Sleep(1 * time.Second) + + // 5. Clean up state files + cleanupStateFiles() + + return mo.Ok(struct{}{}) +} + +// cleanupStateFiles removes all VM state files +func cleanupStateFiles() { + _ = os.Remove(config.PIDFile) + _ = os.Remove(config.SSHPortFile) + _ = os.Remove(config.QMPSocket) +} + +// Status returns the current VM status (running, PID, SSH port). +func Status() mo.Result[VMStatus] { + status := VMStatus{ + Running: false, + PID: 0, + SSHPort: 0, + } + + if !IsRunning() { + return mo.Ok(status) + } + + // Read PID + pidBytes, err := os.ReadFile(config.PIDFile) + if err != nil { + return mo.Err[VMStatus](fmt.Errorf("failed to read PID file: %w", err)) + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + return mo.Err[VMStatus](fmt.Errorf("invalid PID in file: %w", err)) + } + + // Read SSH port + portBytes, err := os.ReadFile(config.SSHPortFile) + if err != nil { + return mo.Err[VMStatus](fmt.Errorf("failed to read SSH port file: %w", err)) + } + + sshPort, err := strconv.Atoi(strings.TrimSpace(string(portBytes))) + if err != nil { + return mo.Err[VMStatus](fmt.Errorf("invalid SSH port in file: %w", err)) + } + + status.Running = true + status.PID = pid + status.SSHPort = sshPort + + return mo.Ok(status) +} + +// Reset stops the VM and deletes the overlay image. +// This returns the VM to a fresh state based on the base image. +func Reset() mo.Result[struct{}] { + // Stop VM if running + stopResult := Stop() + if stopResult.IsError() { + return mo.Err[struct{}](fmt.Errorf("failed to stop VM: %w", stopResult.Error())) + } + + // Delete overlay image + if err := os.Remove(config.Overlay); err != nil && !os.IsNotExist(err) { + return mo.Err[struct{}](fmt.Errorf("failed to delete overlay: %w", err)) + } + + return mo.Ok(struct{}{}) +} + +// IsRunning performs a quick check if the VM is running by checking +// the PID file and verifying the process exists. +func IsRunning() bool { + pidBytes, err := os.ReadFile(config.PIDFile) + if err != nil { + return false + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + return false + } + + // Check if process exists by sending signal 0 + process, err := os.FindProcess(pid) + if err != nil { + return false + } + + err = process.Signal(syscall.Signal(0)) + return err == nil +} + +func buildQEMUArgs(cfg *config.Config, sshPort int, mounts []Mount) []string { + // Boot directly from the qcow2 disk image (has GRUB installed) + // Do NOT use -kernel/-initrd - that's for NixOS VM runner which requires special 9p mounts + args := []string{ + "-machine", "q35", + "-accel", "kvm", + "-cpu", "host", + "-m", cfg.VM.Memory, + "-smp", strconv.Itoa(cfg.VM.CPUs), + "-display", "none", + "-daemonize", + "-pidfile", config.PIDFile, + "-drive", fmt.Sprintf("file=%s,if=virtio,format=qcow2", config.Overlay), + "-netdev", fmt.Sprintf("user,id=n0,hostfwd=tcp::%d-:22", sshPort), + "-device", "virtio-net-pci,netdev=n0", + "-serial", fmt.Sprintf("file:%s", config.SerialLog), + } + + // Add 9p mounts for cache directories and workspaces + for _, mount := range mounts { + args = append(args, + "-virtfs", fmt.Sprintf("local,path=%s,mount_tag=%s,security_model=mapped-xattr,id=%s", + mount.HostPath, mount.Tag, mount.Tag), + ) + } + + return args +} + +// findAvailablePort finds an available TCP port starting from the given base port. +func findAvailablePort(basePort int) (int, error) { + const maxAttempts = 100 + + for i := 0; i < maxAttempts; i++ { + port := basePort + i + + cmd := exec.Command("nc", "-z", "localhost", strconv.Itoa(port)) + if err := cmd.Run(); err != nil { + return port, nil + } + } + + return 0, fmt.Errorf("could not find available port after %d attempts", maxAttempts) +} + +// waitForSSH waits for SSH to become available on the given port. +// Uses sshpass with password 'root' to test connection. +func waitForSSH(port int, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + cmd := exec.Command("sshpass", "-p", "root", + "ssh", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ConnectTimeout=1", + "-p", strconv.Itoa(port), + "root@localhost", + "exit 0") + + if err := cmd.Run(); err == nil { + return nil + } + + time.Sleep(1 * time.Second) + } + + return fmt.Errorf("SSH did not become available within %v", timeout) +} diff --git a/internal/vm/qemu.go b/internal/vm/qemu.go new file mode 100644 index 0000000..deae525 --- /dev/null +++ b/internal/vm/qemu.go @@ -0,0 +1,54 @@ +package vm + +import ( + "fmt" + "qvm/internal/config" + "qvm/internal/virtiofsd" + "strconv" +) + +func buildQEMUCommand(cfg *config.Config, sshPort int, mounts []virtiofsd.Mount) []string { + args := []string{ + "qemu-system-x86_64", + "-enable-kvm", + } + + memSize := cfg.VM.Memory + args = append(args, + "-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%s,share=on", memSize), + "-numa", "node,memdev=mem", + ) + + args = append(args, + "-smp", strconv.Itoa(cfg.VM.CPUs), + ) + + args = append(args, + "-drive", fmt.Sprintf("if=virtio,file=%s,format=qcow2", config.Overlay), + ) + + args = append(args, + "-nic", fmt.Sprintf("user,model=virtio-net-pci,hostfwd=tcp::%d-:22", sshPort), + ) + + args = append(args, + "-serial", fmt.Sprintf("file:%s", config.SerialLog), + ) + + args = append(args, + "-qmp", fmt.Sprintf("unix:%s,server,nowait", config.QMPSocket), + ) + + args = append(args, + "-display", "none", + ) + + for _, mount := range mounts { + args = append(args, + "-chardev", fmt.Sprintf("socket,id=%s,path=%s", mount.Tag, mount.SocketPath), + "-device", fmt.Sprintf("vhost-user-fs-pci,queue-size=1024,chardev=%s,tag=%s", mount.Tag, mount.Tag), + ) + } + + return args +} diff --git a/internal/workspace/registry.go b/internal/workspace/registry.go new file mode 100644 index 0000000..91b7a55 --- /dev/null +++ b/internal/workspace/registry.go @@ -0,0 +1,139 @@ +package workspace + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/samber/mo" +) + +type Workspace struct { + HostPath string `json:"host_path"` + Hash string `json:"hash"` + MountTag string `json:"mount_tag"` + GuestPath string `json:"guest_path"` +} + +type Registry struct { + filePath string + workspaces map[string]Workspace +} + +// Hash generates an 8-character hash from a path, matching bash behavior: +// echo -n "$path" | sha256sum | cut -c1-8 +func Hash(path string) string { + h := sha256.Sum256([]byte(path)) + return fmt.Sprintf("%x", h)[:8] +} + +// NewRegistry creates a new empty registry +func NewRegistry(filePath string) *Registry { + return &Registry{ + filePath: filePath, + workspaces: make(map[string]Workspace), + } +} + +// Load reads the registry from a JSON file +func Load(filePath string) mo.Result[*Registry] { + registry := NewRegistry(filePath) + + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return mo.Ok(registry) + } + + data, err := os.ReadFile(filePath) + if err != nil { + return mo.Err[*Registry](fmt.Errorf("failed to read workspaces file: %w", err)) + } + + if len(data) == 0 { + return mo.Ok(registry) + } + + var workspaceList []Workspace + if err := json.Unmarshal(data, &workspaceList); err != nil { + return mo.Err[*Registry](fmt.Errorf("failed to parse workspaces JSON: %w", err)) + } + + for _, ws := range workspaceList { + registry.workspaces[ws.HostPath] = ws + } + + return mo.Ok(registry) +} + +// Save writes the registry to the JSON file +func (r *Registry) Save() mo.Result[struct{}] { + dir := filepath.Dir(r.filePath) + if err := os.MkdirAll(dir, 0755); err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to create directory: %w", err)) + } + + workspaceList := make([]Workspace, 0, len(r.workspaces)) + for _, ws := range r.workspaces { + workspaceList = append(workspaceList, ws) + } + + data, err := json.MarshalIndent(workspaceList, "", " ") + if err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to marshal JSON: %w", err)) + } + + if err := os.WriteFile(r.filePath, data, 0644); err != nil { + return mo.Err[struct{}](fmt.Errorf("failed to write workspaces file: %w", err)) + } + + return mo.Ok(struct{}{}) +} + +// Register adds a workspace to the registry if it doesn't already exist +func (r *Registry) Register(hostPath string) mo.Result[*Workspace] { + absPath, err := filepath.Abs(hostPath) + if err != nil { + return mo.Err[*Workspace](fmt.Errorf("failed to resolve absolute path: %w", err)) + } + + if existing, exists := r.workspaces[absPath]; exists { + return mo.Ok(&existing) + } + + hash := Hash(absPath) + ws := Workspace{ + HostPath: absPath, + Hash: hash, + MountTag: fmt.Sprintf("ws_%s", hash), + GuestPath: fmt.Sprintf("/workspace/%s", hash), + } + + r.workspaces[absPath] = ws + return mo.Ok(&ws) +} + +// List returns all registered workspaces +func (r *Registry) List() []Workspace { + result := make([]Workspace, 0, len(r.workspaces)) + for _, ws := range r.workspaces { + result = append(result, ws) + } + return result +} + +// Find looks up a workspace by host path +func (r *Registry) Find(hostPath string) mo.Option[Workspace] { + absPath, err := filepath.Abs(hostPath) + if err != nil { + if ws, exists := r.workspaces[hostPath]; exists { + return mo.Some(ws) + } + return mo.None[Workspace]() + } + + if ws, exists := r.workspaces[absPath]; exists { + return mo.Some(ws) + } + return mo.None[Workspace]() +} diff --git a/lib/common.sh b/lib/common.sh deleted file mode 100644 index 61b5525..0000000 --- a/lib/common.sh +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env bash -# -# common.sh - Shared functions and configuration for QVM CLI tool -# -# This file defines XDG-compliant directory paths, constants, and utility -# functions used across all qvm-* commands. It should be sourced by each -# command script via: source "${QVM_LIB_DIR}/common.sh" -# - -set -euo pipefail - -# XDG-compliant directory paths -readonly QVM_DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/qvm" -readonly QVM_STATE_DIR="${XDG_STATE_HOME:-$HOME/.local/state}/qvm" -readonly QVM_CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/qvm" -readonly QVM_CONFIG_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/qvm" - -# Path constants for VM artifacts -readonly QVM_BASE_IMAGE="$QVM_DATA_DIR/base.qcow2" -readonly QVM_OVERLAY="$QVM_STATE_DIR/overlay.qcow2" -readonly QVM_PID_FILE="$QVM_STATE_DIR/vm.pid" -readonly QVM_SSH_PORT_FILE="$QVM_STATE_DIR/ssh.port" -readonly QVM_SERIAL_LOG="$QVM_STATE_DIR/serial.log" -readonly QVM_WORKSPACES_FILE="$QVM_STATE_DIR/workspaces.json" -readonly QVM_USER_FLAKE="$QVM_CONFIG_DIR/flake" -readonly QVM_VM_RUNNER="$QVM_DATA_DIR/run-vm" -readonly QVM_CONFIG_FILE="$QVM_CONFIG_DIR/qvm.conf" - -# Cache directories for 9p mounts (shared between host and VM) -readonly QVM_CARGO_HOME="$QVM_CACHE_DIR/cargo-home" -readonly QVM_CARGO_TARGET="$QVM_CACHE_DIR/cargo-target" -readonly QVM_PNPM_STORE="$QVM_CACHE_DIR/pnpm-store" -readonly QVM_SCCACHE="$QVM_CACHE_DIR/sccache" - -# Host config directories to mount in VM (read-write for tools that need it) -readonly QVM_HOST_OPENCODE_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/opencode" - -# Color codes (only used if stdout is a TTY) -if [[ -t 1 ]]; then - readonly COLOR_INFO='\033[0;36m' # Cyan - readonly COLOR_WARN='\033[0;33m' # Yellow - readonly COLOR_ERROR='\033[0;31m' # Red - readonly COLOR_RESET='\033[0m' # Reset -else - readonly COLOR_INFO='' - readonly COLOR_WARN='' - readonly COLOR_ERROR='' - readonly COLOR_RESET='' -fi - -# -# log_info - Print informational message in cyan -# Usage: log_info "message" -# -log_info() { - echo -e "${COLOR_INFO}[INFO]${COLOR_RESET} $*" >&2 -} - -# -# log_warn - Print warning message in yellow -# Usage: log_warn "message" -# -log_warn() { - echo -e "${COLOR_WARN}[WARN]${COLOR_RESET} $*" >&2 -} - -# -# log_error - Print error message in red -# Usage: log_error "message" -# -log_error() { - echo -e "${COLOR_ERROR}[ERROR]${COLOR_RESET} $*" >&2 -} - -# -# die - Print error message and exit with status 1 -# Usage: die "error message" -# -die() { - log_error "$@" - exit 1 -} - -# -# ensure_dirs - Create all required QVM directories -# Usage: ensure_dirs -# -ensure_dirs() { - mkdir -p "$QVM_DATA_DIR" \ - "$QVM_STATE_DIR" \ - "$QVM_CACHE_DIR" \ - "$QVM_CONFIG_DIR" \ - "$QVM_CARGO_HOME" \ - "$QVM_CARGO_TARGET" \ - "$QVM_PNPM_STORE" \ - "$QVM_SCCACHE" -} - -# -# is_vm_running - Check if VM process is running -# Returns: 0 if running, 1 if not -# Usage: if is_vm_running; then ... fi -# -is_vm_running() { - if [[ ! -f "$QVM_PID_FILE" ]]; then - return 1 - fi - - local pid - pid=$(cat "$QVM_PID_FILE") - - # Check if process exists and is a QEMU process - if kill -0 "$pid" 2>/dev/null; then - return 0 - else - # Stale PID file, remove it - rm -f "$QVM_PID_FILE" - return 1 - fi -} - -# -# get_ssh_port - Read SSH port from state file -# Returns: SSH port number on stdout -# Usage: port=$(get_ssh_port) -# -get_ssh_port() { - if [[ ! -f "$QVM_SSH_PORT_FILE" ]]; then - die "SSH port file not found. Is the VM running?" - fi - cat "$QVM_SSH_PORT_FILE" -} - -# -# workspace_hash - Generate short hash from absolute path -# Args: $1 - absolute path to workspace -# Returns: 8-character hash on stdout -# Usage: hash=$(workspace_hash "/path/to/workspace") -# -workspace_hash() { - local path="$1" - echo -n "$path" | sha256sum | cut -c1-8 -} - -# -# wait_for_ssh - Wait for SSH to become available on VM -# Args: $1 - SSH port number -# $2 - timeout in seconds (default: 60) -# Returns: 0 if SSH is available, 1 on timeout -# Usage: wait_for_ssh "$port" 30 -# -wait_for_ssh() { - local port="${1:-}" - local timeout="${2:-60}" - local elapsed=0 - - if [[ -z "$port" ]]; then - die "wait_for_ssh requires port argument" - fi - - log_info "Waiting for SSH on port $port (timeout: ${timeout}s)..." - - while ((elapsed < timeout)); do - # Actually attempt SSH connection to verify sshd is responding - # nc -z only checks if port is open (QEMU opens it immediately) - # We need to verify sshd is actually ready to accept connections - if timeout 2 sshpass -p root ssh \ - -o StrictHostKeyChecking=no \ - -o UserKnownHostsFile=/dev/null \ - -o LogLevel=ERROR \ - -o PubkeyAuthentication=no \ - -o PasswordAuthentication=yes \ - -o ConnectTimeout=1 \ - -p "$port" \ - root@localhost "true" 2>/dev/null; then - log_info "SSH is ready" - return 0 - fi - sleep 1 - ((elapsed++)) - done - - log_error "SSH did not become available within ${timeout}s" - return 1 -}