removing docker from lio, and other cleanup
This commit is contained in:
parent
c007bb72d2
commit
405576ee63
10 changed files with 45 additions and 423 deletions
|
|
@ -1,166 +0,0 @@
|
||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
name = "inventory";
|
|
||||||
app = "pg-${name}";
|
|
||||||
|
|
||||||
hostDataDir = "/var/lib/${name}";
|
|
||||||
|
|
||||||
localAddress = "192.168.100.110";
|
|
||||||
pg_port = 54433;
|
|
||||||
pg_dataDir = "/var/lib/postgres";
|
|
||||||
# pgadmin_port = 5050;
|
|
||||||
# pgadmin_dataDir = "/var/lib/pgadmin";
|
|
||||||
|
|
||||||
binds = [
|
|
||||||
{
|
|
||||||
host = "${hostDataDir}/postgres";
|
|
||||||
container = pg_dataDir;
|
|
||||||
user = "postgres";
|
|
||||||
uid = config.ids.uids.postgres;
|
|
||||||
}
|
|
||||||
# {
|
|
||||||
# host = "${hostDataDir}/pgadmin";
|
|
||||||
# container = pgadmin_dataDir;
|
|
||||||
# user = "pgadmin";
|
|
||||||
# uid = 1020;
|
|
||||||
# }
|
|
||||||
];
|
|
||||||
in
|
|
||||||
{
|
|
||||||
|
|
||||||
users = lib.foldl (
|
|
||||||
acc: bind:
|
|
||||||
{
|
|
||||||
users.${bind.user} = {
|
|
||||||
isSystemUser = true;
|
|
||||||
home = bind.host;
|
|
||||||
createHome = true;
|
|
||||||
uid = bind.uid;
|
|
||||||
group = bind.user;
|
|
||||||
};
|
|
||||||
groups.${bind.user}.gid = bind.uid;
|
|
||||||
}
|
|
||||||
// acc
|
|
||||||
) { } binds;
|
|
||||||
|
|
||||||
containers.${app} = {
|
|
||||||
ephemeral = true;
|
|
||||||
autoStart = true;
|
|
||||||
privateNetwork = true;
|
|
||||||
hostAddress = "192.168.100.2";
|
|
||||||
localAddress = localAddress;
|
|
||||||
bindMounts = lib.foldl (
|
|
||||||
acc: bind:
|
|
||||||
{
|
|
||||||
"${bind.container}" = {
|
|
||||||
hostPath = bind.host;
|
|
||||||
isReadOnly = false;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
// acc
|
|
||||||
) { } binds;
|
|
||||||
config =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
system.stateVersion = "24.11";
|
|
||||||
|
|
||||||
users = lib.foldl (
|
|
||||||
acc: bind:
|
|
||||||
{
|
|
||||||
users.${bind.user} = {
|
|
||||||
isSystemUser = true;
|
|
||||||
home = bind.container;
|
|
||||||
uid = bind.uid;
|
|
||||||
group = bind.user;
|
|
||||||
};
|
|
||||||
groups.${bind.user}.gid = bind.uid;
|
|
||||||
}
|
|
||||||
// acc
|
|
||||||
) { } binds;
|
|
||||||
|
|
||||||
services.postgresql = {
|
|
||||||
enable = true;
|
|
||||||
package = pkgs.postgresql_17.withJIT;
|
|
||||||
enableJIT = true;
|
|
||||||
extensions = with pkgs.postgresql17Packages; [
|
|
||||||
# NOTE add extensions here
|
|
||||||
pgvector
|
|
||||||
postgis
|
|
||||||
];
|
|
||||||
settings.port = pg_port;
|
|
||||||
enableTCPIP = true;
|
|
||||||
authentication = ''
|
|
||||||
local all all trust
|
|
||||||
host all all 127.0.0.1/8 trust
|
|
||||||
host all all ::1/128 trust
|
|
||||||
host all all 192.168.100.0/24 trust
|
|
||||||
'';
|
|
||||||
identMap = ''
|
|
||||||
# ArbitraryMapName systemUser dbUser
|
|
||||||
superuser_map root ${name}
|
|
||||||
|
|
||||||
# Let other names login as themselves
|
|
||||||
superuser_map /^(.*)$ \1
|
|
||||||
'';
|
|
||||||
ensureDatabases = [ name ];
|
|
||||||
ensureUsers = [
|
|
||||||
{
|
|
||||||
name = name;
|
|
||||||
ensureDBOwnership = true;
|
|
||||||
ensureClauses = {
|
|
||||||
login = true;
|
|
||||||
superuser = true;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
dataDir =
|
|
||||||
(lib.findFirst (bind: bind.user == "postgres") (throw "No postgres bind found") binds).container;
|
|
||||||
};
|
|
||||||
|
|
||||||
# services.pgadmin = {
|
|
||||||
# enable = true;
|
|
||||||
# port = pgadmin_port;
|
|
||||||
# openFirewall = true;
|
|
||||||
# initialEmail = "admin@test.com";
|
|
||||||
# initialPasswordFile = (builtins.toFile "password" "password");
|
|
||||||
# };
|
|
||||||
|
|
||||||
# TODO set this up, had issues since it shares users with postgres service and my bind mounts relys on createhome in that exact directory.
|
|
||||||
# services.postgresqlBackup = {
|
|
||||||
# enable = true;
|
|
||||||
# compression = "gzip";
|
|
||||||
# compressionLevel = 9;
|
|
||||||
# databases = [ cfg.database ];
|
|
||||||
# location = "${cfg.dataDir}/backup";
|
|
||||||
# startAt = "02:30"; # Adjust the backup time as needed
|
|
||||||
# };
|
|
||||||
|
|
||||||
networking.firewall = {
|
|
||||||
enable = true;
|
|
||||||
allowedTCPPorts = [ pg_port ];
|
|
||||||
};
|
|
||||||
|
|
||||||
# Health check to ensure database is ready
|
|
||||||
systemd.services.postgresql-healthcheck = {
|
|
||||||
description = "PostgreSQL Health Check";
|
|
||||||
after = [ "postgresql.service" ];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
ExecStart = ''
|
|
||||||
${pkgs.postgresql_17}/bin/pg_isready \
|
|
||||||
-U ${name} \
|
|
||||||
-d ${name} \
|
|
||||||
-h localhost \
|
|
||||||
-p ${toString pg_port}
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
@ -1,154 +0,0 @@
|
||||||
{ name }:
|
|
||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
# name = "UNIQUE_NAME_ON_HOST";
|
|
||||||
|
|
||||||
hostDataDir = "/var/lib/${name}";
|
|
||||||
hostAddress = "192.168.100.2";
|
|
||||||
containerAddress = "192.168.100.10";
|
|
||||||
|
|
||||||
binds = [
|
|
||||||
# Postgres data, must use postgres user in container and host
|
|
||||||
{
|
|
||||||
host = "${hostDataDir}/postgres";
|
|
||||||
# Adjust based on container postgres data dir
|
|
||||||
container = "/var/lib/postgresql/17";
|
|
||||||
user = "postgres";
|
|
||||||
uid = config.ids.uids.postgres;
|
|
||||||
gid = config.ids.gids.postgres;
|
|
||||||
}
|
|
||||||
# Postgres backups
|
|
||||||
{
|
|
||||||
host = "${hostDataDir}/backups/postgres";
|
|
||||||
container = "/var/backup/postgresql";
|
|
||||||
user = "postgres";
|
|
||||||
uid = config.ids.uids.postgres;
|
|
||||||
gid = config.ids.gids.postgres;
|
|
||||||
}
|
|
||||||
# App data, uses custom user
|
|
||||||
# {
|
|
||||||
# host = "${hostDataDir}/data";
|
|
||||||
# container = "/var/lib/forgejo";
|
|
||||||
# user = "forgejo";
|
|
||||||
# uid = 115;
|
|
||||||
# gid = 115;
|
|
||||||
# }
|
|
||||||
];
|
|
||||||
uniqueUsers = lib.foldl' (
|
|
||||||
acc: bind: if lib.lists.any (item: item.user == bind.user) acc then acc else acc ++ [ bind ]
|
|
||||||
) [ ] binds;
|
|
||||||
users = {
|
|
||||||
users = lib.listToAttrs (
|
|
||||||
lib.map (u: {
|
|
||||||
name = u.user;
|
|
||||||
value = {
|
|
||||||
isSystemUser = true;
|
|
||||||
name = u.user;
|
|
||||||
uid = u.uid;
|
|
||||||
group = u.user;
|
|
||||||
};
|
|
||||||
}) uniqueUsers
|
|
||||||
);
|
|
||||||
|
|
||||||
groups = lib.listToAttrs (
|
|
||||||
lib.map (g: {
|
|
||||||
name = g.user;
|
|
||||||
value.gid = g.gid;
|
|
||||||
}) uniqueUsers
|
|
||||||
);
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
# Ensure users exists on host machine with same IDs as container
|
|
||||||
inherit users;
|
|
||||||
|
|
||||||
# Ensure directories exist on host machine
|
|
||||||
system.activationScripts.createMediaServerDirs = ''
|
|
||||||
${lib.concatStringsSep "\n" (
|
|
||||||
lib.map (bind: ''
|
|
||||||
mkdir -p ${bind.host}
|
|
||||||
chown -R ${toString bind.user}:${toString bind.gid} ${bind.host}
|
|
||||||
chmod -R 750 ${bind.host}
|
|
||||||
'') binds
|
|
||||||
)}
|
|
||||||
'';
|
|
||||||
|
|
||||||
containers.${name} = {
|
|
||||||
ephemeral = true;
|
|
||||||
autoStart = true;
|
|
||||||
privateNetwork = true;
|
|
||||||
hostAddress = hostAddress;
|
|
||||||
localAddress = containerAddress;
|
|
||||||
bindMounts = lib.foldl (
|
|
||||||
acc: bind:
|
|
||||||
{
|
|
||||||
"${bind.container}" = {
|
|
||||||
hostPath = bind.host;
|
|
||||||
isReadOnly = false;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
// acc
|
|
||||||
) { } binds;
|
|
||||||
config =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
system.stateVersion = "24.11";
|
|
||||||
|
|
||||||
# Ensure users exist on container
|
|
||||||
inherit users;
|
|
||||||
|
|
||||||
services.postgresql = {
|
|
||||||
enable = true;
|
|
||||||
package = pkgs.postgresql_17.withJIT;
|
|
||||||
enableJIT = true;
|
|
||||||
extensions = with pkgs.postgresql17Packages; [
|
|
||||||
# NOTE add extensions here
|
|
||||||
pgvector
|
|
||||||
postgis
|
|
||||||
];
|
|
||||||
enableTCPIP = true;
|
|
||||||
authentication = ''
|
|
||||||
local all all trust
|
|
||||||
host all all 127.0.0.1/8 trust
|
|
||||||
host all all ::1/128 trust
|
|
||||||
host all all 192.168.100.0/24 trust
|
|
||||||
'';
|
|
||||||
# identMap = ''
|
|
||||||
# # ArbitraryMapName systemUser dbUser
|
|
||||||
# superuser_map root ${name}
|
|
||||||
#
|
|
||||||
# # Let other names login as themselves
|
|
||||||
# superuser_map /^(.*)$ \1
|
|
||||||
# '';
|
|
||||||
# ensureDatabases = [ name ];
|
|
||||||
# ensureUsers = [
|
|
||||||
# {
|
|
||||||
# name = name;
|
|
||||||
# ensureDBOwnership = true;
|
|
||||||
# ensureClauses = {
|
|
||||||
# login = true;
|
|
||||||
# superuser = true;
|
|
||||||
# };
|
|
||||||
# }
|
|
||||||
# ];
|
|
||||||
};
|
|
||||||
|
|
||||||
# Backup database
|
|
||||||
services.postgresqlBackup = {
|
|
||||||
enable = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
# APP TODO REPLACE THIS WITH SOMETHING
|
|
||||||
services.pgadmin = {
|
|
||||||
enable = true;
|
|
||||||
openFirewall = true;
|
|
||||||
initialEmail = "admin@test.com";
|
|
||||||
initialPasswordFile = (builtins.toFile "password" "password");
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
{
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
{
|
|
||||||
options = { };
|
|
||||||
|
|
||||||
config = {
|
|
||||||
# Random test, visit http://192.168.100.11/
|
|
||||||
containers.wasabi = {
|
|
||||||
ephemeral = true;
|
|
||||||
autoStart = true;
|
|
||||||
privateNetwork = true;
|
|
||||||
hostAddress = "192.168.100.2";
|
|
||||||
localAddress = "192.168.100.11";
|
|
||||||
config =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
system.stateVersion = "24.11";
|
|
||||||
services.httpd.enable = true;
|
|
||||||
services.httpd.adminAddr = "foo@example.org";
|
|
||||||
networking.firewall = {
|
|
||||||
enable = true;
|
|
||||||
allowedTCPPorts = [ 80 ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
virtualisation.oci-containers.containers = {
|
|
||||||
# Example of defining a container, visit http://localhost:8085/
|
|
||||||
"nginx_simple" = {
|
|
||||||
# autoStart = true; this is default true
|
|
||||||
image = "nginx:latest";
|
|
||||||
ports = [
|
|
||||||
"127.0.0.1:8085:80"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
@ -25,29 +25,13 @@ in
|
||||||
];
|
];
|
||||||
|
|
||||||
settings = lib.attrsets.recursiveUpdate {
|
settings = lib.attrsets.recursiveUpdate {
|
||||||
# exec-once = [
|
|
||||||
# "waybar"
|
|
||||||
# ];
|
|
||||||
|
|
||||||
# Default monitor configuration
|
# Default monitor configuration
|
||||||
monitor = "monitor = , preferred, auto, 1";
|
monitor = "monitor = , preferred, auto, 1";
|
||||||
|
|
||||||
# Make workspaces 7-10 always on MONITOR-2 (replace DP-2 if your secondary isn't DP-2)
|
|
||||||
# You can get the name of your monitor via `hyprctl monitors`
|
|
||||||
workspace = [
|
|
||||||
"7, monitor:DP-2, persistent:true"
|
|
||||||
"8, monitor:DP-2, persistent:true"
|
|
||||||
"9, monitor:DP-2, persistent:true"
|
|
||||||
"10, monitor:DP-2, persistent:true"
|
|
||||||
];
|
|
||||||
|
|
||||||
windowrulev2 = [
|
windowrulev2 = [
|
||||||
"float, class:^(?i)chrome-nngceckbapebfimnlniiiahkandclblb-Default$, initialtitle:^_crx_nngceckbapebfimnlniiiahkandclblb$"
|
"float, class:^(?i)chrome-nngceckbapebfimnlniiiahkandclblb-Default$, initialtitle:^_crx_nngceckbapebfimnlniiiahkandclblb$"
|
||||||
"center, class:^(?i)chrome-nngceckbapebfimnlniiiahkandclblb-Default$, initialtitle:^_crx_nngceckbapebfimnlniiiahkandclblb$"
|
"center, class:^(?i)chrome-nngceckbapebfimnlniiiahkandclblb-Default$, initialtitle:^_crx_nngceckbapebfimnlniiiahkandclblb$"
|
||||||
"size 720 600, class:^(?i)chrome-nngceckbapebfimnlniiiahkandclblb-Default$, initialtitle:^_crx_nngceckbapebfimnlniiiahkandclblb$"
|
"size 720 600, class:^(?i)chrome-nngceckbapebfimnlniiiahkandclblb-Default$, initialtitle:^_crx_nngceckbapebfimnlniiiahkandclblb$"
|
||||||
"float, class:.*blueman-manager.*"
|
|
||||||
"size 700 500, class:.*blueman-manager.*"
|
|
||||||
"center, class:.*blueman-manager.*"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
# Input configuration
|
# Input configuration
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@
|
||||||
};
|
};
|
||||||
containers = {
|
containers = {
|
||||||
forgejo = import ./_containers/forgejo.nix;
|
forgejo = import ./_containers/forgejo.nix;
|
||||||
obsidian_sync = import ./_containers/obsidian_sync.nix;
|
# obsidian_sync = import ./_containers/obsidian_sync.nix;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
homeManagerModules = {
|
homeManagerModules = {
|
||||||
|
|
|
||||||
|
|
@ -117,9 +117,9 @@ in
|
||||||
"headscale_auth.age" = {
|
"headscale_auth.age" = {
|
||||||
publicKeys = trustedKeys;
|
publicKeys = trustedKeys;
|
||||||
};
|
};
|
||||||
"obsidian_sync_env.age" = {
|
# "obsidian_sync_env.age" = {
|
||||||
publicKeys = trustedKeys;
|
# publicKeys = trustedKeys;
|
||||||
};
|
# };
|
||||||
"us_chi_wg.age" = {
|
"us_chi_wg.age" = {
|
||||||
publicKeys = trustedKeys;
|
publicKeys = trustedKeys;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -13,15 +13,15 @@ in
|
||||||
options = { };
|
options = { };
|
||||||
|
|
||||||
imports = [
|
imports = [
|
||||||
common.nixosModules.containers.obsidian_sync
|
# common.nixosModules.containers.obsidian_sync
|
||||||
];
|
];
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
# Obsidian Sync settings
|
# Obsidian Sync settings
|
||||||
services.obsidian_sync = {
|
# services.obsidian_sync = {
|
||||||
serverUrl = "https://obsidiansync.joshuabell.xyz";
|
# serverUrl = "https://obsidiansync.joshuabell.xyz";
|
||||||
dockerEnvFiles = [ config.age.secrets.obsidian_sync_env.path ];
|
# dockerEnvFiles = [ config.age.secrets.obsidian_sync_env.path ];
|
||||||
};
|
# };
|
||||||
|
|
||||||
## Give internet access
|
## Give internet access
|
||||||
networking = {
|
networking = {
|
||||||
|
|
@ -62,7 +62,7 @@ in
|
||||||
# };
|
# };
|
||||||
# };
|
# };
|
||||||
|
|
||||||
virtualisation.oci-containers.backend = "docker";
|
# virtualisation.oci-containers.backend = "docker";
|
||||||
|
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
@ -74,7 +74,7 @@ in
|
||||||
"_" = {
|
"_" = {
|
||||||
default = true;
|
default = true;
|
||||||
locations."/" = {
|
locations."/" = {
|
||||||
return = "444"; # or 444 for drop
|
return = "404"; # or 444 for drop
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,6 @@
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
programs = {
|
programs = {
|
||||||
steam.enable = true;
|
|
||||||
nix-ld = {
|
nix-ld = {
|
||||||
enable = true;
|
enable = true;
|
||||||
libraries = with pkgs; [
|
libraries = with pkgs; [
|
||||||
|
|
@ -66,6 +65,10 @@
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
environment.shellAliases = {
|
||||||
|
"oc" =
|
||||||
|
"all_proxy='' http_proxy='' https_proxy='' /home/josh/other/opencode/node_modules/opencode-linux-x64/bin/opencode";
|
||||||
|
};
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
lua
|
lua
|
||||||
|
|
@ -75,12 +78,6 @@
|
||||||
appimage-run
|
appimage-run
|
||||||
nodejs_24
|
nodejs_24
|
||||||
];
|
];
|
||||||
|
|
||||||
environment.shellAliases = {
|
|
||||||
"oc" =
|
|
||||||
"all_proxy='' http_proxy='' https_proxy='' /home/josh/other/opencode/node_modules/opencode-linux-x64/bin/opencode";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Also allow this key to work for root user, this will let us use this as a remote builder easier
|
# Also allow this key to work for root user, this will let us use this as a remote builder easier
|
||||||
users.users.root.openssh.authorizedKeys.keys = [
|
users.users.root.openssh.authorizedKeys.keys = [
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJN2nsLmAlF6zj5dEBkNSJaqcCya+aB6I0imY8Q5Ew0S nix2lio"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJN2nsLmAlF6zj5dEBkNSJaqcCya+aB6I0imY8Q5Ew0S nix2lio"
|
||||||
|
|
@ -113,7 +110,7 @@
|
||||||
tailnet.enable = true;
|
tailnet.enable = true;
|
||||||
tailnet.enableExitNode = true;
|
tailnet.enableExitNode = true;
|
||||||
ssh.enable = true;
|
ssh.enable = true;
|
||||||
docker.enable = true;
|
# docker.enable = true;
|
||||||
virt-manager.enable = true;
|
virt-manager.enable = true;
|
||||||
flatpaks = {
|
flatpaks = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
||||||
|
|
@ -12,13 +12,13 @@ let
|
||||||
"${mainMonitor},3840x2160@97.98,0x0,1,transform,0"
|
"${mainMonitor},3840x2160@97.98,0x0,1,transform,0"
|
||||||
"${secondaryMonitor},3440x1440@99.98,-1440x-640,1,transform,1"
|
"${secondaryMonitor},3440x1440@99.98,-1440x-640,1,transform,1"
|
||||||
];
|
];
|
||||||
workspace =
|
# workspace =
|
||||||
let
|
# let
|
||||||
inherit (builtins) map toString;
|
# inherit (builtins) map toString;
|
||||||
inherit (lib) range;
|
# inherit (lib) range;
|
||||||
mkWs = monitor: i: "${toString i},monitor:${monitor},persistent:true";
|
# mkWs = monitor: i: "${toString i},persistent:true";
|
||||||
in
|
# in
|
||||||
(map (mkWs mainMonitor) (range 1 6)) ++ (map (mkWs secondaryMonitor) (range 7 10));
|
# (map (mkWs mainMonitor) (range 1 6)) ++ (map (mkWs secondaryMonitor) (range 7 10));
|
||||||
};
|
};
|
||||||
|
|
||||||
moveScript = pkgs.writeShellScriptBin "hyprland-move-workspaces" ''
|
moveScript = pkgs.writeShellScriptBin "hyprland-move-workspaces" ''
|
||||||
|
|
@ -91,7 +91,7 @@ let
|
||||||
# Subscribe to Hyprland events and react to monitor changes
|
# Subscribe to Hyprland events and react to monitor changes
|
||||||
''${SOCAT} - "UNIX-CONNECT:${"$"}sock" | while IFS= read -r line; do
|
''${SOCAT} - "UNIX-CONNECT:${"$"}sock" | while IFS= read -r line; do
|
||||||
case "${"$"}line" in
|
case "${"$"}line" in
|
||||||
monitoradded*|monitorremoved*|activemonitor*|layoutchange*)
|
monitoradded*|monitorremoved*|activemonitor*|layoutchange*|createworkspace*)
|
||||||
place_workspaces
|
place_workspaces
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
|
||||||
|
|
@ -172,20 +172,20 @@
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
"obsidiansync.joshuabell.xyz" = {
|
# "obsidiansync.joshuabell.xyz" = {
|
||||||
enableACME = true;
|
# enableACME = true;
|
||||||
forceSSL = true;
|
# forceSSL = true;
|
||||||
locations."/" = {
|
# locations."/" = {
|
||||||
proxyPass = "http://100.64.0.1:5984";
|
# proxyPass = "http://100.64.0.1:5984";
|
||||||
};
|
# };
|
||||||
extraConfig = ''
|
# extraConfig = ''
|
||||||
client_max_body_size 100M;
|
# client_max_body_size 100M;
|
||||||
proxy_redirect off;
|
# proxy_redirect off;
|
||||||
proxy_buffering off;
|
# proxy_buffering off;
|
||||||
proxy_set_header Host $host;
|
# proxy_set_header Host $host;
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
'';
|
# '';
|
||||||
};
|
# };
|
||||||
"jellyfin.joshuabell.xyz" = {
|
"jellyfin.joshuabell.xyz" = {
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue