Refactoring
This commit is contained in:
parent
3fda43968c
commit
c24fbdf955
10 changed files with 283 additions and 257 deletions
42
flake.nix
42
flake.nix
|
|
@ -1,28 +1,40 @@
|
|||
{
|
||||
inputs = {
|
||||
disko.inputs.nixpkgs.follows = "nixpkgs";
|
||||
disko.url = "github:nix-community/disko";
|
||||
disko = {
|
||||
url = "github:nix-community/disko";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
nix-pre-commit-hooks.url = "github:cachix/pre-commit-hooks.nix/master";
|
||||
nix-pre-commit-hooks.inputs.nixpkgs.follows = "nixpkgs";
|
||||
nix-pre-commit-hooks = {
|
||||
url = "github:cachix/pre-commit-hooks.nix/master";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
|
||||
|
||||
nixos-hardware.url = "github:nixos/nixos-hardware/master";
|
||||
|
||||
krops.url = "github:Mic92/krops";
|
||||
krops.inputs.flake-utils.follows = "flake-utils";
|
||||
krops.inputs.nixpkgs.follows = "nixpkgs";
|
||||
krops = {
|
||||
url = "github:Mic92/krops";
|
||||
inputs.flake-utils.follows = "flake-utils";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
sops-nix.url = "github:Mic92/sops-nix";
|
||||
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
sops-nix = {
|
||||
url = "github:Mic92/sops-nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
sbruder-overlay.url = "github:sbruder/nixpkgs-overlay";
|
||||
sbruder-overlay.inputs.flake-utils.follows = "flake-utils";
|
||||
sbruder-overlay.inputs.nix-pre-commit-hooks.follows = "nix-pre-commit-hooks";
|
||||
sbruder-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
||||
sbruder-overlay = {
|
||||
url = "github:sbruder/nixpkgs-overlay";
|
||||
inputs = {
|
||||
flake-utils.follows = "flake-utils";
|
||||
nix-pre-commit-hooks.follows = "nix-pre-commit-hooks";
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
|
|
@ -60,13 +72,13 @@
|
|||
devShells.default = pkgs.mkShell {
|
||||
name = "fablab-nixos-config";
|
||||
|
||||
buildInputs = (with pkgs; [
|
||||
buildInputs = with pkgs; [
|
||||
black
|
||||
nixpkgs-fmt
|
||||
shellcheck
|
||||
sops
|
||||
ssh-to-pgp
|
||||
]);
|
||||
];
|
||||
|
||||
shellHook = ''
|
||||
find ${./keys} -type f -print0 | xargs -0 ${pkgs.gnupg}/bin/gpg --quiet --import
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
{ ... }@inputs:
|
||||
inputs:
|
||||
let
|
||||
hardware = inputs.nixos-hardware.nixosModules;
|
||||
in
|
||||
|
|
|
|||
|
|
@ -9,13 +9,15 @@
|
|||
nixpkgs.config = { allowAliases = false; };
|
||||
|
||||
console.keyMap = "de";
|
||||
services.xserver.layout = "de";
|
||||
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.gnome.enable = true;
|
||||
services.xserver.displayManager.gdm = {
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
autoSuspend = false;
|
||||
layout = "de";
|
||||
desktopManager.gnome.enable = true;
|
||||
displayManager.gdm = {
|
||||
enable = true;
|
||||
autoSuspend = false;
|
||||
};
|
||||
};
|
||||
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
|
|
|
|||
|
|
@ -7,11 +7,11 @@
|
|||
./services
|
||||
];
|
||||
|
||||
networking.hostName = "raven";
|
||||
|
||||
time.timeZone = "Etc/UTC";
|
||||
|
||||
networking = {
|
||||
hostName = "raven";
|
||||
useDHCP = false;
|
||||
vlans = {
|
||||
labprod = {
|
||||
|
|
@ -51,6 +51,7 @@
|
|||
"voip"
|
||||
];
|
||||
};
|
||||
firewall.allowedTCPPorts = [ 80 443 ];
|
||||
};
|
||||
|
||||
i18n.defaultLocale = "en_US.UTF-8";
|
||||
|
|
@ -84,8 +85,6 @@
|
|||
|
||||
services.nginx.enable = true;
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
|
||||
# FIXME
|
||||
networking.hosts = {
|
||||
"192.168.94.1" = [ "raven.lab.fablab-nea.de" "labsync.lab.fablab-nea.de" ];
|
||||
|
|
|
|||
|
|
@ -111,12 +111,12 @@ in
|
|||
ln -s ${cfg.package}/var/lib/asterisk/static-http/core-en_US.xml /var/lib/asterisk/documentation/core-en_US.xml
|
||||
'';
|
||||
|
||||
sops.secrets = (lib.listToAttrs (map
|
||||
sops.secrets = lib.listToAttrs (map
|
||||
(name: lib.nameValuePair "asterisk-${name}" {
|
||||
sopsFile = ../secrets.yaml;
|
||||
owner = config.users.users.asterisk.name;
|
||||
})
|
||||
secretConfigFiles));
|
||||
secretConfigFiles);
|
||||
environment.etc = lib.mapAttrs'
|
||||
(name: _: lib.nameValuePair
|
||||
"asterisk/${name}.conf"
|
||||
|
|
|
|||
|
|
@ -6,66 +6,71 @@ let
|
|||
port = 9001;
|
||||
in
|
||||
{
|
||||
sops.secrets."authelia/jwtSecret" = {
|
||||
sopsFile = ../secrets.yaml;
|
||||
owner = cfg.user;
|
||||
};
|
||||
sops.secrets."authelia/storageEncryptionKey" = {
|
||||
sopsFile = ../secrets.yaml;
|
||||
owner = cfg.user;
|
||||
};
|
||||
services.authelia.instances.default = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server.address = "tcp://127.0.0.1:${toString port}/";
|
||||
access_control = {
|
||||
default_policy = "one_factor";
|
||||
};
|
||||
notifier.filesystem = {
|
||||
filename = "/var/lib/authelia-${cfg.name}/notif.txt";
|
||||
};
|
||||
storage.postgres = {
|
||||
address = "unix:///run/postgresql";
|
||||
database = "authelia-${cfg.name}";
|
||||
username = "authelia-${cfg.name}";
|
||||
password = "authelia-${cfg.name}";
|
||||
};
|
||||
authentication_backend = {
|
||||
file.path = "/var/lib/authelia-${cfg.name}/user.yml";
|
||||
};
|
||||
session = {
|
||||
cookies = [
|
||||
{
|
||||
domain = domain;
|
||||
authelia_url = "https://${domain}";
|
||||
name = "authelia_session";
|
||||
}
|
||||
];
|
||||
};
|
||||
sops.secrets = {
|
||||
"authelia/jwtSecret" = {
|
||||
sopsFile = ../secrets.yaml;
|
||||
owner = cfg.user;
|
||||
};
|
||||
secrets = {
|
||||
jwtSecretFile = config.sops.secrets."authelia/jwtSecret".path;
|
||||
storageEncryptionKeyFile = config.sops.secrets."authelia/storageEncryptionKey".path;
|
||||
"authelia/storageEncryptionKey" = {
|
||||
sopsFile = ../secrets.yaml;
|
||||
owner = cfg.user;
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
ensureUsers = [{
|
||||
name = "authelia-${cfg.name}";
|
||||
ensureDBOwnership = true;
|
||||
}];
|
||||
ensureDatabases = [ "authelia-${cfg.name}" ];
|
||||
};
|
||||
services = {
|
||||
authelia.instances.default = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server.address = "tcp://127.0.0.1:${toString port}/";
|
||||
access_control = {
|
||||
default_policy = "one_factor";
|
||||
};
|
||||
notifier.filesystem = {
|
||||
filename = "/var/lib/authelia-${cfg.name}/notif.txt";
|
||||
};
|
||||
storage.postgres = {
|
||||
address = "unix:///run/postgresql";
|
||||
database = "authelia-${cfg.name}";
|
||||
username = "authelia-${cfg.name}";
|
||||
password = "authelia-${cfg.name}";
|
||||
};
|
||||
authentication_backend = {
|
||||
file.path = "/var/lib/authelia-${cfg.name}/user.yml";
|
||||
};
|
||||
session = {
|
||||
cookies = [
|
||||
{
|
||||
inherit domain;
|
||||
authelia_url = "https://${domain}";
|
||||
name = "authelia_session";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
secrets = {
|
||||
jwtSecretFile = config.sops.secrets."authelia/jwtSecret".path;
|
||||
storageEncryptionKeyFile = config.sops.secrets."authelia/storageEncryptionKey".path;
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
'';
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:${toString port}";
|
||||
recommendedProxySettings = true;
|
||||
postgresql = {
|
||||
ensureUsers = [{
|
||||
name = "authelia-${cfg.name}";
|
||||
ensureDBOwnership = true;
|
||||
}];
|
||||
ensureDatabases = [ "authelia-${cfg.name}" ];
|
||||
};
|
||||
|
||||
nginx.virtualHosts."${domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
'';
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:${toString port}";
|
||||
recommendedProxySettings = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,39 +5,43 @@ let
|
|||
generator_port = 8695;
|
||||
in
|
||||
{
|
||||
services.opentracker.enable = true;
|
||||
services = {
|
||||
opentracker.enable = true;
|
||||
|
||||
services.nginx.virtualHosts."labsync.fablab-nea.de" = {
|
||||
addSSL = true;
|
||||
enableACME = true;
|
||||
locations = {
|
||||
"/generator/".proxyPass = "http://127.0.0.1:${toString generator_port}/";
|
||||
atftpd = {
|
||||
enable = true;
|
||||
root = pkgs.runCommand "pxelinux-tftproot" { } ''
|
||||
mkdir -p $out/pxelinux.cfg
|
||||
cp ${pkgs.syslinux}/share/syslinux/{ldlinux.c32,libcom32.c32,libutil.c32,lpxelinux.0,vesamenu.c32} $out
|
||||
cp ${./splash.png} $out/splash.png
|
||||
cp ${./pxelinux.cfg} $out/pxelinux.cfg/default
|
||||
# required to serve labsync/labsync.cfg, which is generated dynamically by a docker container
|
||||
ln -s /opt/docker/tftpgen/data $out/labsync
|
||||
'';
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."labsync.lab.fablab-nea.de" = {
|
||||
locations = {
|
||||
"/" = {
|
||||
root = "/opt/docker/tftpgen/data";
|
||||
extraConfig = ''
|
||||
autoindex on;
|
||||
'';
|
||||
|
||||
nginx.virtualHosts = {
|
||||
"labsync.fablab-nea.de" = {
|
||||
addSSL = true;
|
||||
enableACME = true;
|
||||
locations = {
|
||||
"/generator/".proxyPass = "http://127.0.0.1:${toString generator_port}/";
|
||||
};
|
||||
};
|
||||
"labsync.lab.fablab-nea.de" = {
|
||||
locations = {
|
||||
"/" = {
|
||||
root = "/opt/docker/tftpgen/data";
|
||||
extraConfig = ''
|
||||
autoindex on;
|
||||
'';
|
||||
};
|
||||
"/generator/".proxyPass = "http://127.0.0.1:${toString generator_port}/";
|
||||
};
|
||||
};
|
||||
"/generator/".proxyPass = "http://127.0.0.1:${toString generator_port}/";
|
||||
};
|
||||
};
|
||||
|
||||
services.atftpd = {
|
||||
enable = true;
|
||||
root = pkgs.runCommand "pxelinux-tftproot" { } ''
|
||||
mkdir -p $out/pxelinux.cfg
|
||||
cp ${pkgs.syslinux}/share/syslinux/{ldlinux.c32,libcom32.c32,libutil.c32,lpxelinux.0,vesamenu.c32} $out
|
||||
cp ${./splash.png} $out/splash.png
|
||||
cp ${./pxelinux.cfg} $out/pxelinux.cfg/default
|
||||
# required to serve labsync/labsync.cfg, which is generated dynamically by a docker container
|
||||
ln -s /opt/docker/tftpgen/data $out/labsync
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
6881 # aria2
|
||||
6969 # opentracker
|
||||
|
|
|
|||
|
|
@ -7,137 +7,139 @@ let
|
|||
mkStaticTarget = target: mkStaticTargets (lib.singleton target);
|
||||
in
|
||||
{
|
||||
services.prometheus.exporters.node.enable = true;
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
webExternalUrl = "https://${domain}";
|
||||
globalConfig = {
|
||||
scrape_interval = "15s";
|
||||
evaluation_interval = "15s";
|
||||
};
|
||||
extraFlags = [
|
||||
"--storage.tsdb.retention.time=90d"
|
||||
"--web.enable-admin-api"
|
||||
];
|
||||
alertmanagers = [
|
||||
{
|
||||
static_configs = mkStaticTarget "${cfg.alertmanager.listenAddress}:${toString cfg.alertmanager.port}";
|
||||
path_prefix = "/alertmanager/";
|
||||
}
|
||||
];
|
||||
alertmanager = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
webExternalUrl = "https://${domain}/alertmanager";
|
||||
configuration = {
|
||||
global.resolve_timeout = "2m";
|
||||
|
||||
route = {
|
||||
receiver = "matrix";
|
||||
group_by = [ "alertname" ];
|
||||
group_wait = "3m";
|
||||
};
|
||||
|
||||
receivers = [
|
||||
{
|
||||
name = "matrix";
|
||||
webhook_configs = lib.singleton {
|
||||
url = "http://localhost/webhook";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = mkStaticTargets [
|
||||
"localhost:${toString cfg.port}"
|
||||
"kleinturmbuehne-router:9100"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = mkStaticTargets [
|
||||
"127.0.0.1:9100"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "asterisk";
|
||||
metrics_path = "/";
|
||||
static_configs = mkStaticTargets [
|
||||
"127.0.0.1:8088"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "mikrotik";
|
||||
static_configs = mkStaticTargets [
|
||||
"${cfg.exporters.mikrotik.listenAddress}:${toString cfg.exporters.mikrotik.port}"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "unifi";
|
||||
static_configs = mkStaticTargets [
|
||||
"${cfg.exporters.unpoller.listenAddress}:${toString cfg.exporters.unpoller.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
rules =
|
||||
let
|
||||
mkAlert = { name, expr, for ? "1m", description ? null }: {
|
||||
alert = name;
|
||||
inherit expr for;
|
||||
annotations = lib.optionalAttrs (description != null) { inherit description; };
|
||||
};
|
||||
in
|
||||
[
|
||||
(lib.generators.toYAML { } {
|
||||
groups = lib.singleton {
|
||||
name = "alert.rules";
|
||||
rules = map mkAlert [
|
||||
{
|
||||
name = "InstanceDown";
|
||||
expr = ''up == 0'';
|
||||
description = "Instance {{ $labels.instance }} of job {{ $labels.job }} has been down for
|
||||
more than 1 minutes.";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
sops.secrets.prometheus-htpasswd = {
|
||||
owner = "nginx";
|
||||
sopsFile = ../secrets.yaml;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
services = {
|
||||
|
||||
basicAuthFile = config.sops.secrets.prometheus-htpasswd.path;
|
||||
|
||||
locations = {
|
||||
"/".proxyPass = "http://${cfg.listenAddress}:${toString cfg.port}";
|
||||
|
||||
"/alertmanager/".proxyPass = "http://${cfg.alertmanager.listenAddress}:${toString cfg.alertmanager.port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.prometheus.exporters.mikrotik = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
configuration = {
|
||||
devices = [
|
||||
prometheus = {
|
||||
exporters.node.enable = true;
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
webExternalUrl = "https://${domain}";
|
||||
globalConfig = {
|
||||
scrape_interval = "15s";
|
||||
evaluation_interval = "15s";
|
||||
};
|
||||
extraFlags = [
|
||||
"--storage.tsdb.retention.time=90d"
|
||||
"--web.enable-admin-api"
|
||||
];
|
||||
features = {
|
||||
bgp = true;
|
||||
dhcp = true;
|
||||
routes = true;
|
||||
optics = true;
|
||||
alertmanagers = [
|
||||
{
|
||||
static_configs = mkStaticTarget "${cfg.alertmanager.listenAddress}:${toString cfg.alertmanager.port}";
|
||||
path_prefix = "/alertmanager/";
|
||||
}
|
||||
];
|
||||
alertmanager = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
webExternalUrl = "https://${domain}/alertmanager";
|
||||
configuration = {
|
||||
global.resolve_timeout = "2m";
|
||||
|
||||
route = {
|
||||
receiver = "matrix";
|
||||
group_by = [ "alertname" ];
|
||||
group_wait = "3m";
|
||||
};
|
||||
|
||||
receivers = [
|
||||
{
|
||||
name = "matrix";
|
||||
webhook_configs = lib.singleton {
|
||||
url = "http://localhost/webhook";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = mkStaticTargets [
|
||||
"localhost:${toString cfg.port}"
|
||||
"kleinturmbuehne-router:9100"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = mkStaticTargets [
|
||||
"127.0.0.1:9100"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "asterisk";
|
||||
metrics_path = "/";
|
||||
static_configs = mkStaticTargets [
|
||||
"127.0.0.1:8088"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "mikrotik";
|
||||
static_configs = mkStaticTargets [
|
||||
"${cfg.exporters.mikrotik.listenAddress}:${toString cfg.exporters.mikrotik.port}"
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "unifi";
|
||||
static_configs = mkStaticTargets [
|
||||
"${cfg.exporters.unpoller.listenAddress}:${toString cfg.exporters.unpoller.port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
rules =
|
||||
let
|
||||
mkAlert = { name, expr, for ? "1m", description ? null }: {
|
||||
alert = name;
|
||||
inherit expr for;
|
||||
annotations = lib.optionalAttrs (description != null) { inherit description; };
|
||||
};
|
||||
in
|
||||
[
|
||||
(lib.generators.toYAML { } {
|
||||
groups = lib.singleton {
|
||||
name = "alert.rules";
|
||||
rules = map mkAlert [
|
||||
{
|
||||
name = "InstanceDown";
|
||||
expr = ''up == 0'';
|
||||
description = "Instance {{ $labels.instance }} of job {{ $labels.job }} has been down for
|
||||
more than 1 minutes.";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
prometheus.exporters.mikrotik = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
configuration = {
|
||||
devices = [
|
||||
];
|
||||
features = {
|
||||
bgp = true;
|
||||
dhcp = true;
|
||||
routes = true;
|
||||
optics = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nginx.virtualHosts."${domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
basicAuthFile = config.sops.secrets.prometheus-htpasswd.path;
|
||||
|
||||
locations = {
|
||||
"/".proxyPass = "http://${cfg.listenAddress}:${toString cfg.port}";
|
||||
|
||||
"/alertmanager/".proxyPass = "http://${cfg.alertmanager.listenAddress}:${toString cfg.alertmanager.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -62,31 +62,33 @@ in
|
|||
};
|
||||
|
||||
# Create the netowrk
|
||||
systemd.services.init-filerun-network-and-files = {
|
||||
description = "Create the network bridge ${networkName} for WeKan.";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
systemd.services = {
|
||||
init-filerun-network-and-files = {
|
||||
description = "Create the network bridge ${networkName} for WeKan.";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig.Type = "oneshot";
|
||||
script =
|
||||
let podmancli = "${pkgs.podman}/bin/podman";
|
||||
in ''
|
||||
if ! ${podmancli} network ls --format '{{ .Name }}' | grep -qFx -- "${networkName}"; then
|
||||
${podmancli} network create "${networkName}"
|
||||
else
|
||||
echo "network already exists"
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services.wekan-restart = {
|
||||
description = "Restart Wekan services.";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
serviceConfig.Type = "oneshot";
|
||||
script =
|
||||
let podmancli = "${pkgs.podman}/bin/podman";
|
||||
in ''
|
||||
if ! ${podmancli} network ls --format '{{ .Name }}' | grep -qFx -- "${networkName}"; then
|
||||
${podmancli} network create "${networkName}"
|
||||
else
|
||||
echo "network already exists"
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
wekan-restart = {
|
||||
description = "Restart Wekan services.";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
};
|
||||
script = ''
|
||||
${pkgs.systemd}/bin/systemctl restart "podman-${databaseName}.service" "podman-${serviceName}.service"
|
||||
'';
|
||||
};
|
||||
script = ''
|
||||
${pkgs.systemd}/bin/systemctl restart "podman-${databaseName}.service" "podman-${serviceName}.service"
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.timers.wekan-restart = {
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
nixpkgs.config.allowUnfreePredicate = (pkg: lib.elem (lib.getName pkg) [
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: lib.elem (lib.getName pkg) [
|
||||
"unifi-controller"
|
||||
"mongodb"
|
||||
]);
|
||||
];
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue