chore: move caddy to common settings

This commit is contained in:
xinyangli 2025-05-10 00:27:51 +08:00
parent e78f1fe200
commit 9b3e4038a9
No known key found for this signature in database
21 changed files with 69 additions and 6709 deletions

View file

@ -10,6 +10,7 @@
commonSettings = {
auth.enable = true;
network.localdns.enable = true;
serverComponents.enable = true;
};
services.openssh.enable = true;

View file

@ -120,8 +120,4 @@ in
reverse_proxy /_synapse/client/* 127.0.0.1:${toString port-synapse}
'';
};
networking.firewall.allowedTCPPorts = [
443
];
}

View file

@ -316,10 +316,6 @@ in
];
};
custom.prometheus = {
exporters.node.enable = true;
};
services.ollama = {
enable = true;
acceleration = "cuda";

View file

@ -36,6 +36,7 @@
commonSettings = {
auth.enable = true;
comin.enable = true;
serverComponents.enable = true;
proxyServer = {
enable = true;
users = [

View file

@ -1,25 +0,0 @@
{
lib,
...
}:
{
imports = [
./hardware-configuration.nix
];
boot.initrd.availableKernelModules =
[
];
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.ens3.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -23,6 +23,8 @@
})
];
networking.firewall.allowedTCPPorts = [ 8443 ];
environment.systemPackages = with pkgs; [
git
libraspberrypi

View file

@ -223,10 +223,7 @@
};
};
networking.firewall.allowedTCPPorts = [ 8443 ];
services.caddy = {
enable = true;
virtualHosts = {
"raspite.coho-tet.ts.net".extraConfig = ''
reverse_proxy ${config.services.home-assistant.config.http.server_host}:${toString config.services.home-assistant.config.http.server_port}

View file

@ -26,16 +26,10 @@
address = [ "23.165.200.99/24" ];
};
networking.firewall.allowedTCPPorts = [
80
443
];
services.caddy.enable = true;
commonSettings = {
auth.enable = true;
comin.enable = true;
serverComponents.enable = true;
};
nixpkgs.system = "x86_64-linux";

View file

@ -32,6 +32,7 @@
};
comin.enable = true;
network.localdns.enable = true;
serverComponents.enable = true;
};
boot = {

View file

@ -35,13 +35,6 @@
}";
in
{
enable = true;
package = pkgs.caddy.withPlugins {
plugins = [
"github.com/caddy-dns/cloudflare@v0.2.1"
];
hash = "sha256-saKJatiBZ4775IV2C5JLOmZ4BwHKFtRZan94aS5pO90=";
};
virtualHosts."derper00.namely.icu:8443".extraConfig = ''
${acmeCF}
reverse_proxy 127.0.0.1:${toString config.services.tailscale.derper.port}
@ -52,10 +45,7 @@
'';
};
networking.firewall.allowedTCPPorts = [
8000
8443
];
networking.firewall.allowedTCPPorts = [ 8443 ];
systemd.services.caddy = {
serviceConfig = {

View file

@ -19,7 +19,7 @@ in
services.caddy.virtualHosts."https://weilite.coho-tet.ts.net:8920".extraConfig = ''
reverse_proxy 127.0.0.1:8096
'';
networking.firewall.allowedTCPPorts = [ 8920 ]; # allow on lan
users.users.jellyfin.extraGroups = [ "render" ];
users.groups.media.members = [ cfg.user ];
}

View file

@ -27,7 +27,6 @@
# environmentFile = config.sops.secrets."ocis/env".path;
};
networking.firewall.allowedTCPPorts = [ 8443 ];
services.caddy.virtualHosts."${config.services.ocis.url}".extraConfig = ''
reverse_proxy ${config.services.ocis.address}:${toString config.services.ocis.port}
'';

View file

@ -38,8 +38,6 @@ in
(mkPrune "xin" "thorite")
];
networking.firewall.allowedTCPPorts = [ 8443 ];
services.caddy.virtualHosts."https://backup.xinyang.life:8443".extraConfig = ''
tls {
dns dnspod {env.DNSPOD_API_TOKEN}

View file

@ -73,6 +73,8 @@ in
watch-dir-enabled = false;
};
};
networking.firewall.allowedTCPPorts = [ 9091 ];
services.caddy.virtualHosts."https://weilite.coho-tet.ts.net:9091".extraConfig = ''
reverse_proxy 127.0.0.1:${toString cfg.settings.rpc-port}
'';
@ -93,6 +95,5 @@ in
};
};
networking.firewall.allowedTCPPorts = [ 9091 ]; # allow on lan
users.groups.media.members = [ cfg.user ];
}