Replace Bash qvm scripts with Go CLI implementation

This commit is contained in:
Joshua Bell 2026-01-26 20:48:32 -06:00
parent ffb456707f
commit 2a6a333721
27 changed files with 2551 additions and 1702 deletions

47
internal/config/config.go Normal file
View file

@ -0,0 +1,47 @@
package config
import (
"fmt"
"os"
"strconv"
"github.com/BurntSushi/toml"
)
type Config struct {
VM VMConfig `toml:"vm"`
}
type VMConfig struct {
Memory string `toml:"memory"`
CPUs int `toml:"cpus"`
}
func Load() (*Config, error) {
cfg := &Config{
VM: VMConfig{
Memory: "30G",
CPUs: 30,
},
}
if _, err := os.Stat(ConfigFile); err == nil {
if _, err := toml.DecodeFile(ConfigFile, cfg); err != nil {
return nil, fmt.Errorf("failed to parse config file %s: %w", ConfigFile, err)
}
}
if memEnv := os.Getenv("QVM_MEMORY"); memEnv != "" {
cfg.VM.Memory = memEnv
}
if cpusEnv := os.Getenv("QVM_CPUS"); cpusEnv != "" {
cpus, err := strconv.Atoi(cpusEnv)
if err != nil {
return nil, fmt.Errorf("QVM_CPUS must be a valid integer: %w", err)
}
cfg.VM.CPUs = cpus
}
return cfg, nil
}

125
internal/config/paths.go Normal file
View file

@ -0,0 +1,125 @@
// Package config handles QVM configuration loading and management.
package config
import (
"os"
"path/filepath"
)
// XDG-compliant directory paths matching bash lib/common.sh exactly
var (
// DataDir is the base directory for QVM data files (base image, runner)
// Defaults to $HOME/.local/share/qvm
DataDir = getXDGPath("XDG_DATA_HOME", ".local/share", "qvm")
// StateDir is the directory for QVM runtime state (overlay, PID, port files)
// Defaults to $HOME/.local/state/qvm
StateDir = getXDGPath("XDG_STATE_HOME", ".local/state", "qvm")
// CacheDir is the directory for shared build caches (cargo, pnpm, sccache)
// Defaults to $HOME/.cache/qvm
CacheDir = getXDGPath("XDG_CACHE_HOME", ".cache", "qvm")
// ConfigDir is the directory for QVM configuration (flake, qvm.toml)
// Defaults to $HOME/.config/qvm
ConfigDir = getXDGPath("XDG_CONFIG_HOME", ".config", "qvm")
)
// Path constants for VM artifacts
var (
// BaseImage is the path to the base VM image (read-only)
BaseImage = filepath.Join(DataDir, "base.qcow2")
// VMRunner is the path to the VM runner script
VMRunner = filepath.Join(DataDir, "run-vm")
// Overlay is the path to the VM overlay image (copy-on-write)
Overlay = filepath.Join(StateDir, "qvm-dev.qcow2")
// PIDFile is the path to the VM process ID file
PIDFile = filepath.Join(StateDir, "vm.pid")
// SSHPortFile is the path to the SSH port file
SSHPortFile = filepath.Join(StateDir, "ssh.port")
// SerialLog is the path to the VM serial console log
SerialLog = filepath.Join(StateDir, "serial.log")
// WorkspacesFile is the path to the workspaces registry JSON
WorkspacesFile = filepath.Join(StateDir, "workspaces.json")
// QMPSocket is the path to the QMP (QEMU Machine Protocol) socket
QMPSocket = filepath.Join(StateDir, "qmp.sock")
// UserFlake is the path to the user's customizable NixOS flake
UserFlake = filepath.Join(ConfigDir, "flake")
// ConfigFile is the path to the QVM TOML configuration file
ConfigFile = filepath.Join(ConfigDir, "qvm.toml")
)
// Cache directories for 9p mounts (shared between host and VM)
var (
// CargoHome is the shared Cargo registry/cache directory
CargoHome = filepath.Join(CacheDir, "cargo-home")
// CargoTarget is the shared Cargo build artifacts directory
CargoTarget = filepath.Join(CacheDir, "cargo-target")
// PnpmStore is the shared pnpm content-addressable store
PnpmStore = filepath.Join(CacheDir, "pnpm-store")
// Sccache is the shared sccache compilation cache
Sccache = filepath.Join(CacheDir, "sccache")
)
// Host config directories to mount in VM (read-write for tools that need it)
var (
// HostOpencodeConfig is the path to the host's opencode configuration
// Defaults to $HOME/.config/opencode
HostOpencodeConfig = getXDGPath("XDG_CONFIG_HOME", ".config", "opencode")
)
// getXDGPath returns an XDG-compliant path with fallback.
// Args:
// - xdgEnv: XDG environment variable to check (e.g., "XDG_DATA_HOME")
// - fallbackPath: relative path from $HOME if xdgEnv is not set (e.g., ".local/share")
// - suffix: additional path suffix to append (e.g., "qvm")
//
// Returns the resolved absolute path.
func getXDGPath(xdgEnv, fallbackPath, suffix string) string {
base := os.Getenv(xdgEnv)
if base == "" {
home := os.Getenv("HOME")
if home == "" {
// Fallback to current directory if HOME is not set
home = "."
}
base = filepath.Join(home, fallbackPath)
}
return filepath.Join(base, suffix)
}
// EnsureDirs creates all required QVM directories if they don't exist.
// Returns error if directory creation fails.
func EnsureDirs() error {
dirs := []string{
DataDir,
StateDir,
CacheDir,
ConfigDir,
CargoHome,
CargoTarget,
PnpmStore,
Sccache,
}
for _, dir := range dirs {
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,27 @@
package logging
import (
"fmt"
"os"
)
// Simple logging package matching bash script format:
// [INFO] message
// [WARN] message
// [ERROR] message
// No timestamps or additional metadata
// Info prints an informational message
func Info(msg string) {
fmt.Printf("[INFO] %s\n", msg)
}
// Warn prints a warning message
func Warn(msg string) {
fmt.Printf("[WARN] %s\n", msg)
}
// Error prints an error message
func Error(msg string) {
fmt.Fprintf(os.Stderr, "[ERROR] %s\n", msg)
}

75
internal/qmp/client.go Normal file
View file

@ -0,0 +1,75 @@
package qmp
import (
"encoding/json"
"fmt"
"time"
"github.com/digitalocean/go-qemu/qmp"
"github.com/samber/mo"
)
type Client struct {
monitor qmp.Monitor
}
type VMStatus struct {
Running bool
Singlestep bool
Status string
}
func Connect(socketPath string) mo.Result[*Client] {
monitor, err := qmp.NewSocketMonitor("unix", socketPath, 2*time.Second)
if err != nil {
return mo.Err[*Client](fmt.Errorf("failed to create socket monitor: %w", err))
}
if err := monitor.Connect(); err != nil {
return mo.Err[*Client](fmt.Errorf("failed to connect to QMP socket: %w", err))
}
return mo.Ok(&Client{monitor: monitor})
}
func (c *Client) Status() mo.Result[VMStatus] {
type statusResult struct {
ID string `json:"id"`
Return struct {
Running bool `json:"running"`
Singlestep bool `json:"singlestep"`
Status string `json:"status"`
} `json:"return"`
}
cmd := []byte(`{"execute":"query-status"}`)
raw, err := c.monitor.Run(cmd)
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to execute query-status: %w", err))
}
var result statusResult
if err := json.Unmarshal(raw, &result); err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to parse status response: %w", err))
}
return mo.Ok(VMStatus{
Running: result.Return.Running,
Singlestep: result.Return.Singlestep,
Status: result.Return.Status,
})
}
func (c *Client) Shutdown() mo.Result[struct{}] {
cmd := []byte(`{"execute":"system_powerdown"}`)
_, err := c.monitor.Run(cmd)
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to execute system_powerdown: %w", err))
}
return mo.Ok(struct{}{})
}
func (c *Client) Close() error {
return c.monitor.Disconnect()
}

View file

@ -0,0 +1,234 @@
package virtiofsd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/samber/mo"
)
type Manager struct {
stateDir string
pids map[string]int
}
func NewManager(stateDir string) *Manager {
return &Manager{
stateDir: stateDir,
pids: make(map[string]int),
}
}
func findVirtiofsd() (string, error) {
// First try PATH
if path, err := exec.LookPath("virtiofsd"); err == nil {
return path, nil
}
// Fall back to nix
cmd := exec.Command("nix", "path-info", "nixpkgs#virtiofsd")
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("virtiofsd not found in PATH and nix lookup failed: %w", err)
}
storePath := strings.TrimSpace(string(output))
virtiofsdPath := filepath.Join(storePath, "bin", "virtiofsd")
if _, err := os.Stat(virtiofsdPath); err != nil {
return "", fmt.Errorf("virtiofsd binary not found at %s", virtiofsdPath)
}
return virtiofsdPath, nil
}
func (m *Manager) StartMount(mount Mount) mo.Result[int] {
if err := m.CleanStale([]Mount{mount}); err != nil {
return mo.Err[int](fmt.Errorf("failed to clean stale socket for %s: %w", mount.Tag, err))
}
if err := os.MkdirAll(mount.HostPath, 0755); err != nil {
return mo.Err[int](fmt.Errorf("failed to create host directory %s: %w", mount.HostPath, err))
}
virtiofsd, err := findVirtiofsd()
if err != nil {
return mo.Err[int](err)
}
cmd := exec.Command(virtiofsd,
"--socket-path="+mount.SocketPath,
"--shared-dir="+mount.HostPath,
"--cache=auto",
)
if err := cmd.Start(); err != nil {
return mo.Err[int](fmt.Errorf("failed to start virtiofsd for %s: %w", mount.Tag, err))
}
pid := cmd.Process.Pid
m.pids[mount.Tag] = pid
pidFile := m.pidFilePath(mount.Tag)
if err := os.WriteFile(pidFile, []byte(strconv.Itoa(pid)), 0644); err != nil {
_ = cmd.Process.Kill()
return mo.Err[int](fmt.Errorf("failed to write PID file for %s: %w", mount.Tag, err))
}
for i := 0; i < 50; i++ {
if _, err := os.Stat(mount.SocketPath); err == nil {
return mo.Ok(pid)
}
time.Sleep(100 * time.Millisecond)
}
_ = m.StopMount(mount)
return mo.Err[int](fmt.Errorf("virtiofsd socket for %s did not appear within 5 seconds", mount.Tag))
}
func (m *Manager) StopMount(mount Mount) mo.Result[struct{}] {
pidFile := m.pidFilePath(mount.Tag)
pidBytes, err := os.ReadFile(pidFile)
if err != nil {
if os.IsNotExist(err) {
return mo.Ok(struct{}{})
}
return mo.Err[struct{}](fmt.Errorf("failed to read PID file for %s: %w", mount.Tag, err))
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return mo.Err[struct{}](fmt.Errorf("invalid PID in file for %s: %w", mount.Tag, err))
}
process, err := os.FindProcess(pid)
if err != nil {
_ = os.Remove(pidFile)
_ = os.Remove(mount.SocketPath)
return mo.Ok(struct{}{})
}
if err := process.Signal(syscall.SIGTERM); err != nil {
_ = os.Remove(pidFile)
_ = os.Remove(mount.SocketPath)
return mo.Ok(struct{}{})
}
done := make(chan bool, 1)
go func() {
_, _ = process.Wait()
done <- true
}()
select {
case <-done:
case <-time.After(5 * time.Second):
_ = process.Signal(syscall.SIGKILL)
<-done
}
_ = os.Remove(pidFile)
_ = os.Remove(mount.SocketPath)
delete(m.pids, mount.Tag)
return mo.Ok(struct{}{})
}
func (m *Manager) StartAll(mounts []Mount) mo.Result[struct{}] {
started := []Mount{}
for _, mount := range mounts {
result := m.StartMount(mount)
if result.IsError() {
for i := len(started) - 1; i >= 0; i-- {
_ = m.StopMount(started[i])
}
return mo.Err[struct{}](fmt.Errorf("failed to start mount %s: %w", mount.Tag, result.Error()))
}
started = append(started, mount)
}
return mo.Ok(struct{}{})
}
func (m *Manager) StopAll() mo.Result[struct{}] {
files, err := filepath.Glob(filepath.Join(m.stateDir, "virtiofsd-*.pid"))
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to list PID files: %w", err))
}
for _, pidFile := range files {
pidBytes, err := os.ReadFile(pidFile)
if err != nil {
continue
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
continue
}
if process, err := os.FindProcess(pid); err == nil {
_ = process.Signal(syscall.SIGTERM)
time.Sleep(100 * time.Millisecond)
_ = process.Signal(syscall.SIGKILL)
}
_ = os.Remove(pidFile)
}
sockFiles, err := filepath.Glob(filepath.Join(m.stateDir, "*.sock"))
if err == nil {
for _, sockFile := range sockFiles {
_ = os.Remove(sockFile)
}
}
m.pids = make(map[string]int)
return mo.Ok(struct{}{})
}
func (m *Manager) CleanStale(mounts []Mount) error {
for _, mount := range mounts {
if _, err := os.Stat(mount.SocketPath); err == nil {
pidFile := m.pidFilePath(mount.Tag)
pidBytes, err := os.ReadFile(pidFile)
if err != nil {
_ = os.Remove(mount.SocketPath)
continue
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
_ = os.Remove(mount.SocketPath)
_ = os.Remove(pidFile)
continue
}
process, err := os.FindProcess(pid)
if err != nil {
_ = os.Remove(mount.SocketPath)
_ = os.Remove(pidFile)
continue
}
if err := process.Signal(syscall.Signal(0)); err != nil {
_ = os.Remove(mount.SocketPath)
_ = os.Remove(pidFile)
}
}
}
return nil
}
func (m *Manager) pidFilePath(tag string) string {
return filepath.Join(m.stateDir, "virtiofsd-"+tag+".pid")
}

View file

@ -0,0 +1,51 @@
package virtiofsd
import (
"path/filepath"
"qvm/internal/config"
)
// Mount represents a single virtiofsd mount configuration
type Mount struct {
Tag string // Mount tag (e.g., "cargo_home", "ws_abc12345")
HostPath string // Path on host to share
SocketPath string // Path to virtiofsd socket
}
// DefaultCacheMounts returns the standard cache mounts for cargo, pnpm, and sccache.
// These are shared across all projects and mounted at VM start.
func DefaultCacheMounts() []Mount {
return []Mount{
{
Tag: "cargo_home",
HostPath: config.CargoHome,
SocketPath: filepath.Join(config.StateDir, "cargo_home.sock"),
},
{
Tag: "cargo_target",
HostPath: config.CargoTarget,
SocketPath: filepath.Join(config.StateDir, "cargo_target.sock"),
},
{
Tag: "pnpm_store",
HostPath: config.PnpmStore,
SocketPath: filepath.Join(config.StateDir, "pnpm_store.sock"),
},
{
Tag: "sccache",
HostPath: config.Sccache,
SocketPath: filepath.Join(config.StateDir, "sccache.sock"),
},
}
}
// WorkspaceMount creates a Mount configuration for a single workspace.
// mountTag should be the workspace's mount tag (e.g., "ws_abc12345")
// hostPath is the absolute path on the host to share
func WorkspaceMount(mountTag, hostPath string) Mount {
return Mount{
Tag: mountTag,
HostPath: hostPath,
SocketPath: filepath.Join(config.StateDir, mountTag+".sock"),
}
}

379
internal/vm/lifecycle.go Normal file
View file

@ -0,0 +1,379 @@
package vm
import (
"fmt"
"os"
"os/exec"
"qvm/internal/config"
"qvm/internal/logging"
"qvm/internal/workspace"
"strconv"
"strings"
"syscall"
"time"
"github.com/samber/mo"
)
// VMStatus represents the current state of the VM
type VMStatus struct {
Running bool
PID int
SSHPort int
}
// Mount represents a 9p filesystem mount
type Mount struct {
Tag string
HostPath string
}
// Start launches the VM with all configured mounts.
// Sequence:
// 1. Check if VM is already running (via PID file and process check)
// 2. Ensure all required directories exist
// 3. Build mount list (cache mounts + workspace mounts from registry)
// 4. Find available SSH port
// 5. Build and start VM via runner script with 9p virtfs mounts
// 6. Write PID and SSH port to state files
// 7. Wait for SSH to become available (60 second timeout)
//
// Returns error if any step fails.
func Start(cfg *config.Config, reg *workspace.Registry) mo.Result[struct{}] {
// 1. Check if already running
if IsRunning() {
return mo.Err[struct{}](fmt.Errorf("VM is already running"))
}
// 2. Ensure directories exist
if err := config.EnsureDirs(); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to create directories: %w", err))
}
// 2a. Check if base image exists
if _, err := os.Stat(config.BaseImage); os.IsNotExist(err) {
return mo.Err[struct{}](fmt.Errorf("base image not found at %s - run 'qvm rebuild' first", config.BaseImage))
}
// 2b. Create overlay if it doesn't exist (backed by base image)
if _, err := os.Stat(config.Overlay); os.IsNotExist(err) {
if _, err := os.Stat(config.BaseImage); os.IsNotExist(err) {
return mo.Err[struct{}](fmt.Errorf("base image not found at %s - run 'qvm rebuild' first", config.BaseImage))
}
logging.Info("Creating overlay image backed by base image...")
cmd := exec.Command("qemu-img", "create", "-f", "qcow2",
"-F", "qcow2", "-b", config.BaseImage, config.Overlay)
if output, err := cmd.CombinedOutput(); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to create overlay: %s: %w", string(output), err))
}
}
// 3. Build mount list (for 9p virtfs)
mounts := []Mount{
{Tag: "cargo_home", HostPath: config.CargoHome},
{Tag: "cargo_target", HostPath: config.CargoTarget},
{Tag: "pnpm_store", HostPath: config.PnpmStore},
{Tag: "sccache", HostPath: config.Sccache},
}
// Add opencode config mount if directory exists
if _, err := os.Stat(config.HostOpencodeConfig); err == nil {
mounts = append(mounts, Mount{
Tag: "opencode_config",
HostPath: config.HostOpencodeConfig,
})
}
// Add workspace mounts from registry
for _, ws := range reg.List() {
mounts = append(mounts, Mount{
Tag: ws.MountTag,
HostPath: ws.HostPath,
})
}
// 4. Find available SSH port
sshPort, err := findAvailablePort(2222)
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to find available SSH port: %w", err))
}
// 5. Build QEMU command and start VM directly
args := buildQEMUArgs(cfg, sshPort, mounts)
cmd := exec.Command("qemu-system-x86_64", args...)
cmd.Stdout = nil
cmd.Stderr = nil
cmd.Stdin = nil
if err := cmd.Run(); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to start QEMU: %w", err))
}
logging.Info("Waiting for VM to daemonize...")
pidFileReady := false
for i := 0; i < 10; i++ {
time.Sleep(500 * time.Millisecond)
if _, err := os.Stat(config.PIDFile); err == nil {
pidFileReady = true
break
}
}
if !pidFileReady {
return mo.Err[struct{}](fmt.Errorf("QEMU did not create PID file after 5 seconds"))
}
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to read PID file: %w", err))
}
pid := strings.TrimSpace(string(pidBytes))
logging.Info("VM started with PID " + pid)
if err := os.WriteFile(config.SSHPortFile, []byte(strconv.Itoa(sshPort)), 0644); err != nil {
if pidBytes, err := os.ReadFile(config.PIDFile); err == nil {
if pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))); err == nil {
if process, err := os.FindProcess(pid); err == nil {
_ = process.Kill()
}
}
}
_ = os.Remove(config.PIDFile)
return mo.Err[struct{}](fmt.Errorf("failed to write SSH port file: %w", err))
}
// 7. Wait for SSH
if err := waitForSSH(sshPort, 120*time.Second); err != nil {
_ = cmd.Process.Kill()
_ = os.Remove(config.PIDFile)
_ = os.Remove(config.SSHPortFile)
return mo.Err[struct{}](fmt.Errorf("VM started but SSH not available: %w", err))
}
return mo.Ok(struct{}{})
}
// Stop gracefully shuts down the VM.
// Sequence:
// 1. Read PID from file
// 2. Send SIGTERM to the process
// 3. Wait up to 30 seconds for graceful shutdown (poll every second)
// 4. If still running, send SIGKILL
// 5. Clean up PID and port files
//
// Returns success even if VM is not running (idempotent).
func Stop() mo.Result[struct{}] {
// 1. Read PID file
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
if os.IsNotExist(err) {
// Not running
return mo.Ok(struct{}{})
}
return mo.Err[struct{}](fmt.Errorf("failed to read PID file: %w", err))
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return mo.Err[struct{}](fmt.Errorf("invalid PID in file: %w", err))
}
// Check if process exists
process, err := os.FindProcess(pid)
if err != nil {
// Process doesn't exist, clean up
cleanupStateFiles()
return mo.Ok(struct{}{})
}
// 2. Send SIGTERM for graceful shutdown
if err := process.Signal(syscall.SIGTERM); err != nil {
// Process already gone
cleanupStateFiles()
return mo.Ok(struct{}{})
}
// 3. Wait up to 30 seconds for process to exit (poll every second)
for i := 0; i < 30; i++ {
time.Sleep(1 * time.Second)
// Check if process still exists by sending signal 0
if err := process.Signal(syscall.Signal(0)); err != nil {
// Process no longer exists
cleanupStateFiles()
return mo.Ok(struct{}{})
}
}
// 4. Timeout, force kill
_ = process.Signal(syscall.SIGKILL)
// Wait a moment for SIGKILL to take effect
time.Sleep(1 * time.Second)
// 5. Clean up state files
cleanupStateFiles()
return mo.Ok(struct{}{})
}
// cleanupStateFiles removes all VM state files
func cleanupStateFiles() {
_ = os.Remove(config.PIDFile)
_ = os.Remove(config.SSHPortFile)
_ = os.Remove(config.QMPSocket)
}
// Status returns the current VM status (running, PID, SSH port).
func Status() mo.Result[VMStatus] {
status := VMStatus{
Running: false,
PID: 0,
SSHPort: 0,
}
if !IsRunning() {
return mo.Ok(status)
}
// Read PID
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to read PID file: %w", err))
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("invalid PID in file: %w", err))
}
// Read SSH port
portBytes, err := os.ReadFile(config.SSHPortFile)
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("failed to read SSH port file: %w", err))
}
sshPort, err := strconv.Atoi(strings.TrimSpace(string(portBytes)))
if err != nil {
return mo.Err[VMStatus](fmt.Errorf("invalid SSH port in file: %w", err))
}
status.Running = true
status.PID = pid
status.SSHPort = sshPort
return mo.Ok(status)
}
// Reset stops the VM and deletes the overlay image.
// This returns the VM to a fresh state based on the base image.
func Reset() mo.Result[struct{}] {
// Stop VM if running
stopResult := Stop()
if stopResult.IsError() {
return mo.Err[struct{}](fmt.Errorf("failed to stop VM: %w", stopResult.Error()))
}
// Delete overlay image
if err := os.Remove(config.Overlay); err != nil && !os.IsNotExist(err) {
return mo.Err[struct{}](fmt.Errorf("failed to delete overlay: %w", err))
}
return mo.Ok(struct{}{})
}
// IsRunning performs a quick check if the VM is running by checking
// the PID file and verifying the process exists.
func IsRunning() bool {
pidBytes, err := os.ReadFile(config.PIDFile)
if err != nil {
return false
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
if err != nil {
return false
}
// Check if process exists by sending signal 0
process, err := os.FindProcess(pid)
if err != nil {
return false
}
err = process.Signal(syscall.Signal(0))
return err == nil
}
func buildQEMUArgs(cfg *config.Config, sshPort int, mounts []Mount) []string {
// Boot directly from the qcow2 disk image (has GRUB installed)
// Do NOT use -kernel/-initrd - that's for NixOS VM runner which requires special 9p mounts
args := []string{
"-machine", "q35",
"-accel", "kvm",
"-cpu", "host",
"-m", cfg.VM.Memory,
"-smp", strconv.Itoa(cfg.VM.CPUs),
"-display", "none",
"-daemonize",
"-pidfile", config.PIDFile,
"-drive", fmt.Sprintf("file=%s,if=virtio,format=qcow2", config.Overlay),
"-netdev", fmt.Sprintf("user,id=n0,hostfwd=tcp::%d-:22", sshPort),
"-device", "virtio-net-pci,netdev=n0",
"-serial", fmt.Sprintf("file:%s", config.SerialLog),
}
// Add 9p mounts for cache directories and workspaces
for _, mount := range mounts {
args = append(args,
"-virtfs", fmt.Sprintf("local,path=%s,mount_tag=%s,security_model=mapped-xattr,id=%s",
mount.HostPath, mount.Tag, mount.Tag),
)
}
return args
}
// findAvailablePort finds an available TCP port starting from the given base port.
func findAvailablePort(basePort int) (int, error) {
const maxAttempts = 100
for i := 0; i < maxAttempts; i++ {
port := basePort + i
cmd := exec.Command("nc", "-z", "localhost", strconv.Itoa(port))
if err := cmd.Run(); err != nil {
return port, nil
}
}
return 0, fmt.Errorf("could not find available port after %d attempts", maxAttempts)
}
// waitForSSH waits for SSH to become available on the given port.
// Uses sshpass with password 'root' to test connection.
func waitForSSH(port int, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
cmd := exec.Command("sshpass", "-p", "root",
"ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "ConnectTimeout=1",
"-p", strconv.Itoa(port),
"root@localhost",
"exit 0")
if err := cmd.Run(); err == nil {
return nil
}
time.Sleep(1 * time.Second)
}
return fmt.Errorf("SSH did not become available within %v", timeout)
}

54
internal/vm/qemu.go Normal file
View file

@ -0,0 +1,54 @@
package vm
import (
"fmt"
"qvm/internal/config"
"qvm/internal/virtiofsd"
"strconv"
)
func buildQEMUCommand(cfg *config.Config, sshPort int, mounts []virtiofsd.Mount) []string {
args := []string{
"qemu-system-x86_64",
"-enable-kvm",
}
memSize := cfg.VM.Memory
args = append(args,
"-object", fmt.Sprintf("memory-backend-memfd,id=mem,size=%s,share=on", memSize),
"-numa", "node,memdev=mem",
)
args = append(args,
"-smp", strconv.Itoa(cfg.VM.CPUs),
)
args = append(args,
"-drive", fmt.Sprintf("if=virtio,file=%s,format=qcow2", config.Overlay),
)
args = append(args,
"-nic", fmt.Sprintf("user,model=virtio-net-pci,hostfwd=tcp::%d-:22", sshPort),
)
args = append(args,
"-serial", fmt.Sprintf("file:%s", config.SerialLog),
)
args = append(args,
"-qmp", fmt.Sprintf("unix:%s,server,nowait", config.QMPSocket),
)
args = append(args,
"-display", "none",
)
for _, mount := range mounts {
args = append(args,
"-chardev", fmt.Sprintf("socket,id=%s,path=%s", mount.Tag, mount.SocketPath),
"-device", fmt.Sprintf("vhost-user-fs-pci,queue-size=1024,chardev=%s,tag=%s", mount.Tag, mount.Tag),
)
}
return args
}

View file

@ -0,0 +1,139 @@
package workspace
import (
"crypto/sha256"
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/samber/mo"
)
type Workspace struct {
HostPath string `json:"host_path"`
Hash string `json:"hash"`
MountTag string `json:"mount_tag"`
GuestPath string `json:"guest_path"`
}
type Registry struct {
filePath string
workspaces map[string]Workspace
}
// Hash generates an 8-character hash from a path, matching bash behavior:
// echo -n "$path" | sha256sum | cut -c1-8
func Hash(path string) string {
h := sha256.Sum256([]byte(path))
return fmt.Sprintf("%x", h)[:8]
}
// NewRegistry creates a new empty registry
func NewRegistry(filePath string) *Registry {
return &Registry{
filePath: filePath,
workspaces: make(map[string]Workspace),
}
}
// Load reads the registry from a JSON file
func Load(filePath string) mo.Result[*Registry] {
registry := NewRegistry(filePath)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return mo.Ok(registry)
}
data, err := os.ReadFile(filePath)
if err != nil {
return mo.Err[*Registry](fmt.Errorf("failed to read workspaces file: %w", err))
}
if len(data) == 0 {
return mo.Ok(registry)
}
var workspaceList []Workspace
if err := json.Unmarshal(data, &workspaceList); err != nil {
return mo.Err[*Registry](fmt.Errorf("failed to parse workspaces JSON: %w", err))
}
for _, ws := range workspaceList {
registry.workspaces[ws.HostPath] = ws
}
return mo.Ok(registry)
}
// Save writes the registry to the JSON file
func (r *Registry) Save() mo.Result[struct{}] {
dir := filepath.Dir(r.filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to create directory: %w", err))
}
workspaceList := make([]Workspace, 0, len(r.workspaces))
for _, ws := range r.workspaces {
workspaceList = append(workspaceList, ws)
}
data, err := json.MarshalIndent(workspaceList, "", " ")
if err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to marshal JSON: %w", err))
}
if err := os.WriteFile(r.filePath, data, 0644); err != nil {
return mo.Err[struct{}](fmt.Errorf("failed to write workspaces file: %w", err))
}
return mo.Ok(struct{}{})
}
// Register adds a workspace to the registry if it doesn't already exist
func (r *Registry) Register(hostPath string) mo.Result[*Workspace] {
absPath, err := filepath.Abs(hostPath)
if err != nil {
return mo.Err[*Workspace](fmt.Errorf("failed to resolve absolute path: %w", err))
}
if existing, exists := r.workspaces[absPath]; exists {
return mo.Ok(&existing)
}
hash := Hash(absPath)
ws := Workspace{
HostPath: absPath,
Hash: hash,
MountTag: fmt.Sprintf("ws_%s", hash),
GuestPath: fmt.Sprintf("/workspace/%s", hash),
}
r.workspaces[absPath] = ws
return mo.Ok(&ws)
}
// List returns all registered workspaces
func (r *Registry) List() []Workspace {
result := make([]Workspace, 0, len(r.workspaces))
for _, ws := range r.workspaces {
result = append(result, ws)
}
return result
}
// Find looks up a workspace by host path
func (r *Registry) Find(hostPath string) mo.Option[Workspace] {
absPath, err := filepath.Abs(hostPath)
if err != nil {
if ws, exists := r.workspaces[hostPath]; exists {
return mo.Some(ws)
}
return mo.None[Workspace]()
}
if ws, exists := r.workspaces[absPath]; exists {
return mo.Some(ws)
}
return mo.None[Workspace]()
}