qvm/flake/default-vm/flake.nix

329 lines
11 KiB
Nix

{
description = "Default NixOS VM template for QVM development environments";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
home-manager = {
url = "github:nix-community/home-manager";
inputs.nixpkgs.follows = "nixpkgs";
};
opencode.url = "github:anomalyco/opencode";
common.url = "git+https://git.joshuabell.xyz/ringofstorms/dotfiles?dir=flakes/common";
ros_neovim.url = "git+https://git.joshuabell.xyz/ringofstorms/nvim";
};
outputs =
{
self,
nixpkgs,
...
}@inputs:
let
system = "x86_64-linux";
stateVersion = "26.05";
vmModule =
{
config,
pkgs,
lib,
...
}:
{
imports = [
inputs.home-manager.nixosModules.home-manager
# inputs.ros_neovim.nixosModules.default
inputs.common.nixosModules.essentials
inputs.common.nixosModules.git
inputs.common.nixosModules.zsh
inputs.common.nixosModules.tmux
];
nixpkgs.config = {
allowUnfree = true;
allowUnfreePredicate = (_: true);
};
# Root filesystem configuration for disk image
# Use /dev/vda1 directly instead of by-label to avoid initrd label detection issues
fileSystems."/" = {
device = "/dev/vda1";
autoResize = true;
fsType = "ext4";
};
# Boot loader configuration for disk image
boot.loader.grub.device = lib.mkDefault "/dev/vda";
# Explicitly load virtio block device module in initrd
boot.initrd.availableKernelModules = [ "virtio_blk" "virtio_pci" "virtio" ];
# Serial console for headless operation with QEMU -nographic
boot.kernelParams = [ "console=ttyS0,115200n8" ];
# GRUB serial console configuration
boot.loader.grub.extraConfig = ''
serial --unit=0 --speed=115200
terminal_input serial
terminal_output serial
'';
# Getty on serial console for login prompt
systemd.services."serial-getty@ttyS0".enable = true;
# Distinctive hostname for easy identification
networking.hostName = "qvm-dev";
# SSH enabled with password auth for root
services.openssh = {
enable = true;
settings.PasswordAuthentication = true;
settings.PermitRootLogin = "yes";
};
# Root user with password and zsh
users.users.root = {
password = "root";
shell = pkgs.zsh;
};
programs.zsh.enable = true;
# Home manager configuration for nice shell
home-manager = {
useUserPackages = true;
useGlobalPkgs = true;
backupFileExtension = "bak";
sharedModules = [
inputs.common.homeManagerModules.git
inputs.common.homeManagerModules.postgres_cli_options
inputs.common.homeManagerModules.starship
inputs.common.homeManagerModules.zoxide
inputs.common.homeManagerModules.zsh
inputs.common.homeManagerModules.tmux
inputs.common.homeManagerModules.direnv
({ programs.direnv.config.whitelist.prefix = [ "/" ]; })
];
users.root = {
home.stateVersion = stateVersion;
programs.home-manager.enable = true;
};
};
# Avoid slow boots due to wait-online
systemd.network.wait-online.enable = false;
systemd.services.NetworkManager-wait-online.enable = lib.mkForce false;
systemd.services.systemd-networkd-wait-online.enable = lib.mkForce false;
networking.firewall.allowedTCPPorts = [ 22 ];
# Enable flakes
nix.settings.experimental-features = [
"nix-command"
"flakes"
];
# Josh's timezone
time.timeZone = "America/Chicago";
# Git safe.directory for virtiofs ownership issues
environment.etc."gitconfig".text = ''
[safe]
directory = *
'';
# virtiofs mount points for caches (must match qvm virtiofsd mount tags)
# Using virtiofs instead of 9p for better performance and hot-mount support
fileSystems."/cache/cargo" = {
device = "cargo_home";
fsType = "virtiofs";
options = [ "nofail" ];
};
fileSystems."/cache/target" = {
device = "cargo_target";
fsType = "virtiofs";
options = [ "nofail" ];
};
fileSystems."/cache/pnpm" = {
device = "pnpm_store";
fsType = "virtiofs";
options = [ "nofail" ];
};
fileSystems."/cache/sccache" = {
device = "sccache";
fsType = "virtiofs";
options = [ "nofail" ];
};
fileSystems."/root/.config/opencode" = {
device = "opencode_config";
fsType = "virtiofs";
options = [ "nofail" ];
};
# Environment variables for cache directories
environment.variables = {
CARGO_HOME = "/cache/cargo";
CARGO_TARGET_DIR = "/cache/target";
PNPM_HOME = "/cache/pnpm";
SCCACHE_DIR = "/cache/sccache";
};
# Ensure workspace and cache directories exist
systemd.tmpfiles.rules = [
"d /workspace 0755 root root -"
"d /cache 0755 root root -"
"d /cache/cargo 0755 root root -"
"d /cache/target 0755 root root -"
"d /cache/pnpm 0755 root root -"
"d /cache/sccache 0755 root root -"
];
# Systemd mount units for cache directories using virtiofs
# virtiofs provides better performance than 9p and supports hot-mounting
systemd.mounts = [
{
what = "cargo_home";
where = "/cache/cargo";
type = "virtiofs";
options = "nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "cargo_target";
where = "/cache/target";
type = "virtiofs";
options = "nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "pnpm_store";
where = "/cache/pnpm";
type = "virtiofs";
options = "nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "sccache";
where = "/cache/sccache";
type = "virtiofs";
options = "nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
{
what = "opencode_config";
where = "/root/.config/opencode";
type = "virtiofs";
options = "nofail";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-modules-load.service" ];
}
];
# Essential packages for development
environment.systemPackages = with pkgs; [
git
vim
tmux
htop
curl
jq
ripgrep
fd
inputs.opencode.packages.${system}.default
];
# Opencode aliases without proxy interference
environment.shellAliases = {
"oc" = "all_proxy='' http_proxy='' https_proxy='' opencode";
"occ" = "oc -c";
"ocd" = "all_proxy='' http_proxy='' https_proxy='' opencode --dangerously-skip-permissions";
"ocdc" = "ocd -c";
};
# MOTD to clearly show this is qvm-dev
users.motd = ''
QVM Development VM
Hostname: qvm-dev
Caches: /cache/{cargo,target,...}
Workspace: /workspace
'';
# GB disk size
virtualisation.diskSize = 40 * 1024;
# NOTE: Using virtiofs for filesystem sharing (via virtiofsd + vhost-user-fs-pci)
# This provides better performance than 9p and supports hot-mounting workspaces
# without VM restart. The qvm CLI manages virtiofsd daemons for each mount.
system.stateVersion = stateVersion;
};
in
let
pkgs = nixpkgs.legacyPackages.${system};
# Use standard NixOS VM builder instead of nixos-generators
# nixos-generators qcow format has a 100MB RAM build VM that OOMs with large closures
baseVm = nixpkgs.lib.nixosSystem {
inherit system;
modules = [ vmModule ];
};
in
{
nixosConfigurations.base = baseVm;
# Runnable VM script (./result/bin/run-qvm-dev-vm)
packages.${system} = {
# QCOW2 disk image for base VM
# Using make-disk-image.nix with sufficient memSize to avoid OOM during build
default = import "${nixpkgs}/nixos/lib/make-disk-image.nix" {
inherit pkgs;
lib = nixpkgs.lib;
config = baseVm.config;
# Disk image settings
format = "qcow2";
diskSize = "auto";
additionalSpace = "2G"; # Extra space beyond closure size (default 512M)
partitionTableType = "legacy"; # Use simple MBR instead of hybrid
label = "nixos"; # Explicit label matching fileSystems."/" device
# CRITICAL: Increase build VM memory to 16GB for large closures
# The closure includes NixOS + home-manager + opencode + dev tools (~2GB+)
# Default 512MB and even 2GB was insufficient, causing OOM during cptofs
memSize = 16384;
};
# Keep the runner script as an alternative for debugging
vm = baseVm.config.system.build.vm;
};
apps.${system}.default = {
type = "app";
program = "${baseVm.config.system.build.vm}/bin/run-qvm-dev-vm";
};
devShells.${system}.default = pkgs.mkShellNoCC {
QEMU_NET_OPTS = "hostfwd=tcp::2222-:22";
};
nixosModules.default = vmModule;
};
}