Compare commits

...
Sign in to create a new pull request.

58 commits

Author SHA1 Message Date
Jakob Lechner
cac031dd8f flake.lock: Update
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/624fd86460e482017ed9c3c3c55a3758c06a4e7f' (2024-09-19)
  → 'github:nix-community/disko/6d42596a35d34918a905e8539a44d3fc91f42b5b' (2024-09-24)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/10d5e0ecc32984c1bf1a9a46586be3451c42fd94' (2024-09-19)
  → 'github:nixos/nixos-hardware/d0cb432a9d28218df11cbd77d984a2a46caeb5ac' (2024-09-22)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/086b448a5d54fd117f4dc2dee55c9f0ff461bdc1' (2024-09-16)
  → 'github:nixos/nixpkgs/23cbb250f3bf4f516a2d0bf03c51a30900848075' (2024-09-22)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/99dc8785f6a0adac95f5e2ab05cc2e1bf666d172' (2024-09-16)
  → 'github:nixos/nixpkgs/9357f4f23713673f310988025d9dc261c20e70c6' (2024-09-21)
2024-09-24 21:40:28 +02:00
Jakob Lechner
26f62b0ed5 Update unifi controller to version 8 2024-09-24 21:40:28 +02:00
Jakob Lechner
09d3b5b22f flake.lock: Update
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/e1174d991944a01eaaa04bc59c6281edca4c0e6e' (2024-06-20)
  → 'github:nix-community/disko/624fd86460e482017ed9c3c3c55a3758c06a4e7f' (2024-09-19)
• Updated input 'flake-utils':
    'github:numtide/flake-utils/b1d9ab70662946ef0850d488da1c9019f3a9752a' (2024-03-11)
  → 'github:numtide/flake-utils/c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a' (2024-09-17)
• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/8cd35b9496d21a6c55164d8547d9d5280162b07a' (2024-06-20)
  → 'github:cachix/pre-commit-hooks.nix/4e743a6920eab45e8ba0fbe49dc459f1423a4b74' (2024-09-19)
• Updated input 'nix-pre-commit-hooks/nixpkgs-stable':
    'github:NixOS/nixpkgs/842253bf992c3a7157b67600c2857193f126563a' (2024-06-15)
  → 'github:NixOS/nixpkgs/194846768975b7ad2c4988bdb82572c00222c0d7' (2024-07-07)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/083823b7904e43a4fc1c7229781417e875359a42' (2024-06-20)
  → 'github:nixos/nixos-hardware/10d5e0ecc32984c1bf1a9a46586be3451c42fd94' (2024-09-19)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/938aa157bbd6e3c6fd7dcb77998b1f92c2ad1631' (2024-06-18)
  → 'github:nixos/nixpkgs/086b448a5d54fd117f4dc2dee55c9f0ff461bdc1' (2024-09-16)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/c00d587b1a1afbf200b1d8f0b0e4ba9deb1c7f0e' (2024-06-18)
  → 'github:nixos/nixpkgs/99dc8785f6a0adac95f5e2ab05cc2e1bf666d172' (2024-09-16)
• Updated input 'sbruder-overlay':
    'github:sbruder/nixpkgs-overlay/2bcb2b6c7b0e04f4ef8e51e00fd93a5e5cb00bf8' (2024-04-12)
  → 'github:sbruder/nixpkgs-overlay/3487b8ce24d40cc898f3dba0a9af5e028e1d5844' (2024-07-02)
• Updated input 'sbruder-overlay/poetry2nix':
    'github:nix-community/poetry2nix/7acb78166a659d6afe9b043bb6fe5cb5e86bb75e' (2023-12-01)
  → 'github:nix-community/poetry2nix/184960be60652ca7f865123e8394ece988afb566' (2024-04-30)
• Updated input 'sbruder-overlay/poetry2nix/nix-github-actions':
    'github:nix-community/nix-github-actions/4bb5e752616262457bc7ca5882192a564c0472d2' (2023-11-03)
  → 'github:nix-community/nix-github-actions/5163432afc817cf8bd1f031418d1869e4c9d5547' (2023-12-29)
• Updated input 'sbruder-overlay/poetry2nix/treefmt-nix':
    'github:numtide/treefmt-nix/e82f32aa7f06bbbd56d7b12186d555223dc399d1' (2023-11-12)
  → 'github:numtide/treefmt-nix/c6aaf729f34a36c445618580a9f95a48f5e4e03f' (2024-04-25)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/797ce4c1f45a85df6dd3d9abdc53f2691bea9251' (2024-06-16)
  → 'github:Mic92/sops-nix/e2d404a7ea599a013189aa42947f66cede0645c8' (2024-09-16)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/c884223af91820615a6146af1ae1fea25c107005' (2024-06-15)
  → 'github:NixOS/nixpkgs/dc454045f5b5d814e5862a6d057e7bb5c29edc05' (2024-09-08)
2024-09-24 21:40:28 +02:00
Jakob Lechner
43db0de26a Update my SSH keys 2024-09-24 21:40:28 +02:00
Jakob Lechner
ad3b9ab43f Reinstall raven 2024-09-24 21:40:28 +02:00
Jakob Lechner
001ebc9b1f Add disko 2024-06-21 14:04:30 +02:00
Jakob Lechner
af1c8a76ba flake.lock: Update
Flake lock file updates:

• Updated input 'flake-utils':
    'github:numtide/flake-utils/d465f4819400de7c8d874d50b982301f28a84605' (2024-02-28)
  → 'github:numtide/flake-utils/b1d9ab70662946ef0850d488da1c9019f3a9752a' (2024-03-11)
• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/5df5a70ad7575f6601d91f0efec95dd9bc619431' (2024-02-15)
  → 'github:cachix/pre-commit-hooks.nix/8cd35b9496d21a6c55164d8547d9d5280162b07a' (2024-06-20)
• Removed input 'nix-pre-commit-hooks/flake-utils'
• Updated input 'nix-pre-commit-hooks/gitignore':
    'github:hercules-ci/gitignore.nix/43e1aa1308018f37118e34d3a9cb4f5e75dc11d5' (2023-12-29)
  → 'github:hercules-ci/gitignore.nix/637db329424fd7e46cf4185293b9cc8c88c95394' (2024-02-28)
• Updated input 'nix-pre-commit-hooks/nixpkgs-stable':
    'github:NixOS/nixpkgs/3dc440faeee9e889fe2d1b4d25ad0f430d449356' (2024-01-10)
  → 'github:NixOS/nixpkgs/842253bf992c3a7157b67600c2857193f126563a' (2024-06-15)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/59e37017b9ed31dee303dbbd4531c594df95cfbc' (2024-03-02)
  → 'github:nixos/nixos-hardware/083823b7904e43a4fc1c7229781417e875359a42' (2024-06-20)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/805a384895c696f802a9bf5bf4720f37385df547' (2024-05-31)
  → 'github:nixos/nixpkgs/938aa157bbd6e3c6fd7dcb77998b1f92c2ad1631' (2024-06-18)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/b8697e57f10292a6165a20f03d2f42920dfaf973' (2024-03-03)
  → 'github:nixos/nixpkgs/c00d587b1a1afbf200b1d8f0b0e4ba9deb1c7f0e' (2024-06-18)
• Updated input 'sbruder-overlay':
    'github:sbruder/nixpkgs-overlay/32ef4fd545a29cdcb2613934525b97470818b42e' (2024-01-01)
  → 'github:sbruder/nixpkgs-overlay/2bcb2b6c7b0e04f4ef8e51e00fd93a5e5cb00bf8' (2024-04-12)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/25dd60fdd08fcacee2567a26ba6b91fe098941dc' (2024-03-06)
  → 'github:Mic92/sops-nix/797ce4c1f45a85df6dd3d9abdc53f2691bea9251' (2024-06-16)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/66d65cb00b82ffa04ee03347595aa20e41fe3555' (2024-03-03)
  → 'github:NixOS/nixpkgs/c884223af91820615a6146af1ae1fea25c107005' (2024-06-15)
2024-06-20 23:45:30 +02:00
Jakob Lechner
684da44657 Update to 24.05 2024-06-04 00:24:09 +02:00
Jakob Lechner
0615870cee Update system.stateVersion 2024-03-06 23:27:16 +00:00
Jakob Lechner
f51cb0e8dd Fix overlay 2024-03-06 23:27:05 +00:00
Jakob Lechner
48c96851d1 Remove exa as it has been removed 2024-03-06 23:25:28 +00:00
Jakob Lechner
a2bf925873 Add luks passfile 2024-03-06 23:19:27 +00:00
Jakob Lechner
af9856537e Fix deprecation
types.string is deprecated
2024-03-06 23:19:09 +00:00
Jakob Lechner
3cb035de5e flake.lock: Update
Flake lock file updates:

• Updated input 'flake-utils':
    'github:numtide/flake-utils/919d646de7be200f3bf08cb76ae1f09402b6f9b4' (2023-07-11)
  → 'github:numtide/flake-utils/d465f4819400de7c8d874d50b982301f28a84605' (2024-02-28)
• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/52bf404674068e7f1ad8ee08bb95648be5a4fb19' (2023-08-03)
  → 'github:cachix/pre-commit-hooks.nix/5df5a70ad7575f6601d91f0efec95dd9bc619431' (2024-02-15)
• Updated input 'nix-pre-commit-hooks/flake-compat':
    'github:edolstra/flake-compat/35bb57c0c8d8b62bbfd284272c928ceb64ddbde9' (2023-01-17)
  → 'github:edolstra/flake-compat/0f9255e01c2351cc7d116c072cb317785dd33b33' (2023-10-04)
• Updated input 'nix-pre-commit-hooks/gitignore':
    'github:hercules-ci/gitignore.nix/a20de23b925fd8264fd7fad6454652e142fd7f73' (2022-08-14)
  → 'github:hercules-ci/gitignore.nix/43e1aa1308018f37118e34d3a9cb4f5e75dc11d5' (2023-12-29)
• Updated input 'nix-pre-commit-hooks/nixpkgs-stable':
    'github:NixOS/nixpkgs/c37ca420157f4abc31e26f436c1145f8951ff373' (2023-06-03)
  → 'github:NixOS/nixpkgs/3dc440faeee9e889fe2d1b4d25ad0f430d449356' (2024-01-10)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/24f9162b26f0debd163f6d94752aa2acb9db395a' (2023-08-02)
  → 'github:nixos/nixos-hardware/59e37017b9ed31dee303dbbd4531c594df95cfbc' (2024-03-02)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/66aedfd010204949cb225cf749be08cb13ce1813' (2023-08-02)
  → 'github:nixos/nixpkgs/b8697e57f10292a6165a20f03d2f42920dfaf973' (2024-03-03)
• Updated input 'sbruder-overlay':
    'github:sbruder/nixpkgs-overlay/fcd0dc1d7532403fead90e7aad4595133cc994e7' (2023-06-17)
  → 'github:sbruder/nixpkgs-overlay/32ef4fd545a29cdcb2613934525b97470818b42e' (2024-01-01)
• Updated input 'sbruder-overlay/poetry2nix':
    'github:nix-community/poetry2nix/e2d2c7a31485aeb801fa85da2d0fa103dd5112ef' (2023-04-22)
  → 'github:nix-community/poetry2nix/7acb78166a659d6afe9b043bb6fe5cb5e86bb75e' (2023-12-01)
• Added input 'sbruder-overlay/poetry2nix/nix-github-actions':
    'github:nix-community/nix-github-actions/4bb5e752616262457bc7ca5882192a564c0472d2' (2023-11-03)
• Added input 'sbruder-overlay/poetry2nix/nix-github-actions/nixpkgs':
    follows 'sbruder-overlay/poetry2nix/nixpkgs'
• Added input 'sbruder-overlay/poetry2nix/systems':
    'github:nix-systems/default/da67096a3b9bf56a91d16901293e51ba5b49a27e' (2023-04-09)
• Added input 'sbruder-overlay/poetry2nix/treefmt-nix':
    'github:numtide/treefmt-nix/e82f32aa7f06bbbd56d7b12186d555223dc399d1' (2023-11-12)
• Added input 'sbruder-overlay/poetry2nix/treefmt-nix/nixpkgs':
    follows 'sbruder-overlay/poetry2nix/nixpkgs'
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/c36df4fe4bf4bb87759b1891cab21e7a05219500' (2023-07-24)
  → 'github:Mic92/sops-nix/25dd60fdd08fcacee2567a26ba6b91fe098941dc' (2024-03-06)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/ce45b591975d070044ca24e3003c830d26fea1c8' (2023-07-22)
  → 'github:NixOS/nixpkgs/66d65cb00b82ffa04ee03347595aa20e41fe3555' (2024-03-03)
2024-03-06 22:38:19 +00:00
Jakob Lechner
1dd960d23f Update to 23.11 2024-03-06 22:34:00 +00:00
Jakob Lechner
93c1fa27e9 Removed input nixpkgs-asterisk 2024-03-06 22:32:32 +00:00
Jakob Lechner
f08fd1ad0c Update my GPG key 2024-03-06 22:17:20 +00:00
Jakob Lechner
fe93c7c863 Fix dyndns
- remove IPv6 configuration
- use `freedns.afraid.org` to retrieve IP address
2024-03-06 22:09:24 +00:00
Jakob Lechner
ba2d32e624
Add mailhog
Let's use it for now until we have a proper mailing setup.
2023-08-05 10:44:49 +00:00
Jakob Lechner
53fec820de
Add Prometheus 2023-08-05 10:44:46 +00:00
Jakob Lechner
bae054fc55
Remove targetHost setting 2023-08-04 10:54:44 +00:00
Jakob Lechner
a69ff29470
Add grafana 2023-08-04 10:09:33 +00:00
Jakob Lechner
20b3f1ef42
flake.lock: Update
Flake lock file updates:

• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/9289996dcac62fd45836db7c07b87d2521eb526d' (2023-07-27)
  → 'github:cachix/pre-commit-hooks.nix/52bf404674068e7f1ad8ee08bb95648be5a4fb19' (2023-08-03)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/ba9650b14e83b365fb9e731f7d7c803f22d2aecf' (2023-07-24)
  → 'github:nixos/nixos-hardware/24f9162b26f0debd163f6d94752aa2acb9db395a' (2023-08-02)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/f3fbbc36b4e179a5985b9ab12624e9dfe7989341' (2023-07-26)
  → 'github:nixos/nixpkgs/bd836ac5e5a7358dea73cb74a013ca32864ccb86' (2023-08-01)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/ef99fa5c5ed624460217c31ac4271cfb5cb2502c' (2023-07-25)
  → 'github:nixos/nixpkgs/66aedfd010204949cb225cf749be08cb13ce1813' (2023-08-02)
• Updated input 'sbruder-overlay':
    'github:sbruder/nixpkgs-overlay/b095898a01dd3bf434488a18f887e718e2f5e528' (2023-03-06)
  → 'github:sbruder/nixpkgs-overlay/fcd0dc1d7532403fead90e7aad4595133cc994e7' (2023-06-17)
• Added input 'sbruder-overlay/poetry2nix':
    'github:nix-community/poetry2nix/e2d2c7a31485aeb801fa85da2d0fa103dd5112ef' (2023-04-22)
• Added input 'sbruder-overlay/poetry2nix/flake-utils':
    follows 'sbruder-overlay/flake-utils'
• Added input 'sbruder-overlay/poetry2nix/nixpkgs':
    follows 'sbruder-overlay/nixpkgs'
2023-08-04 10:09:33 +00:00
Jakob Lechner
9a60592474
Remove eventphone registration 2023-08-04 10:09:32 +00:00
Jakob Lechner
79231df64d
Add SIP-DECT 2023-08-04 10:09:32 +00:00
Jakob Lechner
1c498bd3b9
Add weinturm hosts 2023-08-04 10:09:29 +00:00
Jakob Lechner
f66d88b45e
Add tags 2023-07-28 04:17:36 +00:00
Jakob Lechner
bab350fae3
Fix asterisk not loading res_geolocation
See https://github.com/NixOS/nixpkgs/issues/208165
2023-07-28 04:15:36 +00:00
Jakob Lechner
32b8480264
flake.lock: Update
Flake lock file updates:

• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/eb433bff05b285258be76513add6f6c57b441775' (2023-07-18)
  → 'github:cachix/pre-commit-hooks.nix/9289996dcac62fd45836db7c07b87d2521eb526d' (2023-07-27)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/d4ea64f2063820120c05f6ba93ee02e6d4671d6b' (2023-07-14)
  → 'github:nixos/nixos-hardware/ba9650b14e83b365fb9e731f7d7c803f22d2aecf' (2023-07-24)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/08700de174bc6235043cb4263b643b721d936bdb' (2023-07-18)
  → 'github:nixos/nixpkgs/f3fbbc36b4e179a5985b9ab12624e9dfe7989341' (2023-07-26)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/684c17c429c42515bafb3ad775d2a710947f3d67' (2023-07-18)
  → 'github:nixos/nixpkgs/ef99fa5c5ed624460217c31ac4271cfb5cb2502c' (2023-07-25)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/bd695cc4d0a5e1bead703cc1bec5fa3094820a81' (2023-07-16)
  → 'github:Mic92/sops-nix/c36df4fe4bf4bb87759b1891cab21e7a05219500' (2023-07-24)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/13231eccfa1da771afa5c0807fdd73e05a1ec4e6' (2023-07-16)
  → 'github:NixOS/nixpkgs/ce45b591975d070044ca24e3003c830d26fea1c8' (2023-07-22)
2023-07-27 21:08:03 +00:00
Jakob Lechner
b58558db65
Replace hard-coded path to freeradius 2023-07-20 19:49:51 +00:00
Jakob Lechner
f960367e32
flake.lock: Update
Flake lock file updates:

• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/c8d18ba345730019c3faf412c96a045ade171895' (2023-07-05)
  → 'github:cachix/pre-commit-hooks.nix/eb433bff05b285258be76513add6f6c57b441775' (2023-07-18)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/44bc025007e5fcc10dbc3d9f96dcbf06fc0e8c1c' (2023-07-11)
  → 'github:nixos/nixos-hardware/d4ea64f2063820120c05f6ba93ee02e6d4671d6b' (2023-07-14)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/fcc147b1e9358a8386b2c4368bd928e1f63a7df2' (2023-07-13)
  → 'github:nixos/nixpkgs/08700de174bc6235043cb4263b643b721d936bdb' (2023-07-18)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/2de8efefb6ce7f5e4e75bdf57376a96555986841' (2023-07-12)
  → 'github:nixos/nixpkgs/684c17c429c42515bafb3ad775d2a710947f3d67' (2023-07-18)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/88b964df6981e4844c07be8c192aa6bdca768a10' (2023-07-12)
  → 'github:Mic92/sops-nix/bd695cc4d0a5e1bead703cc1bec5fa3094820a81' (2023-07-16)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/510d721ce097150ae3b80f84b04b13b039186571' (2023-07-09)
  → 'github:NixOS/nixpkgs/13231eccfa1da771afa5c0807fdd73e05a1ec4e6' (2023-07-16)
2023-07-20 19:49:48 +00:00
Jakob Lechner
27ebfe4fae
Add restart timer to update container image 2023-07-14 13:39:14 +00:00
Jakob Lechner
5425a5fac6
Add pull policy 2023-07-14 13:38:55 +00:00
Jakob Lechner
768d7ac559
Add variable networkName 2023-07-14 13:38:53 +00:00
Jakob Lechner
a023ff01f0
Add variable serviceName 2023-07-14 13:36:38 +00:00
Jakob Lechner
700b505de4
Add Wekan 2023-07-14 12:18:27 +00:00
Jakob Lechner
fbca9cf7e0
Update state version 2023-07-13 23:32:18 +00:00
Jakob Lechner
27460d3682
Fix deprecations 2023-07-13 23:20:41 +00:00
Jakob Lechner
3cefc7f9dd
flake.lock: Update
Flake lock file updates:

• Updated input 'flake-utils':
    'github:numtide/flake-utils/cfacdce06f30d2b68473a46042957675eebb3401' (2023-04-11)
  → 'github:numtide/flake-utils/919d646de7be200f3bf08cb76ae1f09402b6f9b4' (2023-07-11)
• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/61e567d6497bc9556f391faebe5e410e6623217f' (2023-05-23)
  → 'github:cachix/pre-commit-hooks.nix/c8d18ba345730019c3faf412c96a045ade171895' (2023-07-05)
• Updated input 'nix-pre-commit-hooks/nixpkgs-stable':
    'github:NixOS/nixpkgs/9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8' (2023-03-15)
  → 'github:NixOS/nixpkgs/c37ca420157f4abc31e26f436c1145f8951ff373' (2023-06-03)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/4cc688ee711159b9bcb5a367be44007934e1a49d' (2023-05-24)
  → 'github:nixos/nixos-hardware/44bc025007e5fcc10dbc3d9f96dcbf06fc0e8c1c' (2023-07-11)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/3e01645c40b92d29f3ae76344a6d654986a91a91' (2023-05-25)
  → 'github:nixos/nixpkgs/fcc147b1e9358a8386b2c4368bd928e1f63a7df2' (2023-07-13)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/f91ee3065de91a3531329a674a45ddcb3467a650' (2023-05-24)
  → 'github:nixos/nixpkgs/2de8efefb6ce7f5e4e75bdf57376a96555986841' (2023-07-12)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/4ccdfb573f323a108a44c13bb7730e42baf962a9' (2023-05-21)
  → 'github:Mic92/sops-nix/88b964df6981e4844c07be8c192aa6bdca768a10' (2023-07-12)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/d0dade110dc7072d67ce27826cfe9ab2ab0cf247' (2023-05-21)
  → 'github:NixOS/nixpkgs/510d721ce097150ae3b80f84b04b13b039186571' (2023-07-09)
2023-07-13 16:33:25 +00:00
Jakob Lechner
3e6fdc74f8
Allow unfree package: mongodb
mongodb is a requirement for Ubiquity controller.
2023-05-26 18:40:11 +00:00
Jakob Lechner
6c4bf599bc
flake.lock: Update
Flake lock file updates:

• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/fb58866e20af98779017134319b5663b8215d912' (2023-04-27)
  → 'github:cachix/pre-commit-hooks.nix/61e567d6497bc9556f391faebe5e410e6623217f' (2023-05-23)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/fb1317948339713afa82a775a8274a91334f6182' (2023-05-11)
  → 'github:nixos/nixos-hardware/4cc688ee711159b9bcb5a367be44007934e1a49d' (2023-05-24)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/04aaf8511678a0d0f347fdf1e8072fe01e4a509e' (2023-05-24)
  → 'github:nixos/nixpkgs/3e01645c40b92d29f3ae76344a6d654986a91a91' (2023-05-25)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/897876e4c484f1e8f92009fd11b7d988a121a4e7' (2023-05-06)
  → 'github:nixos/nixpkgs/f91ee3065de91a3531329a674a45ddcb3467a650' (2023-05-24)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/36b062a2c85a0efb37de1300c79c54602a094fab' (2023-05-08)
  → 'github:Mic92/sops-nix/4ccdfb573f323a108a44c13bb7730e42baf962a9' (2023-05-21)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/ba0086c178d4ed60a7899f739caea553eca2e046' (2023-05-08)
  → 'github:NixOS/nixpkgs/d0dade110dc7072d67ce27826cfe9ab2ab0cf247' (2023-05-21)
2023-05-26 15:40:26 +00:00
Jakob Lechner
1d4d931dd2
Bump nixos version to 23.05 2023-05-25 18:36:25 +00:00
Jakob Lechner
1b8846cb22
flake.lock: Update
Flake lock file updates:

• Updated input 'flake-utils':
    'github:numtide/flake-utils/5aed5285a952e0b949eb3ba02c12fa4fcfef535f' (2022-11-02)
  → 'github:numtide/flake-utils/cfacdce06f30d2b68473a46042957675eebb3401' (2023-04-11)
• Added input 'flake-utils/systems':
    'github:nix-systems/default/da67096a3b9bf56a91d16901293e51ba5b49a27e' (2023-04-09)
• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/67d98f02443b9928bc77f1267741dcfdd3d7b65c' (2022-12-26)
  → 'github:cachix/pre-commit-hooks.nix/fb58866e20af98779017134319b5663b8215d912' (2023-04-27)
• Updated input 'nix-pre-commit-hooks/flake-compat':
    'github:edolstra/flake-compat/009399224d5e398d03b22badca40a37ac85412a1' (2022-11-17)
  → 'github:edolstra/flake-compat/35bb57c0c8d8b62bbfd284272c928ceb64ddbde9' (2023-01-17)
• Updated input 'nix-pre-commit-hooks/nixpkgs-stable':
    'github:NixOS/nixpkgs/d513b448cc2a6da2c8803e3c197c9fc7e67b19e3' (2022-12-17)
  → 'github:NixOS/nixpkgs/9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8' (2023-03-15)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/0517e81e8ce24a0f4f9eebedbd7bbefcac97c058' (2023-01-01)
  → 'github:nixos/nixos-hardware/fb1317948339713afa82a775a8274a91334f6182' (2023-05-11)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/913a47cd064cc06440ea84e5e0452039a85781f0' (2022-12-29)
  → 'github:nixos/nixpkgs/a08e061a4ee8329747d54ddf1566d34c55c895eb' (2023-05-09)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/677ed08a50931e38382dbef01cba08a8f7eac8f6' (2022-12-29)
  → 'github:nixos/nixpkgs/897876e4c484f1e8f92009fd11b7d988a121a4e7' (2023-05-06)
• Updated input 'sbruder-overlay':
    'github:sbruder/nixpkgs-overlay/ff4ce742bffb71fc983cb13a3634ec0d243d869c' (2022-11-04)
  → 'github:sbruder/nixpkgs-overlay/b095898a01dd3bf434488a18f887e718e2f5e528' (2023-03-06)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/b35586cc5abacd4eba9ead138b53e2a60920f781' (2023-01-01)
  → 'github:Mic92/sops-nix/36b062a2c85a0efb37de1300c79c54602a094fab' (2023-05-08)
• Updated input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/feda52be1d59f13b9aa02f064b4f14784b9a06c8' (2022-12-31)
  → 'github:NixOS/nixpkgs/ba0086c178d4ed60a7899f739caea553eca2e046' (2023-05-08)
2023-05-12 19:10:10 +00:00
Jakob Lechner
9e8e2baba1
Re-enable opentracker
libowfat issue is fixed
2023-01-01 16:53:26 +00:00
Jakob Lechner
120074449b
flake.lock: Update
Flake lock file updates:

• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/3eb97d920682777005930ebe01797dc54b1ccb32' (2022-11-04)
  → 'github:cachix/pre-commit-hooks.nix/67d98f02443b9928bc77f1267741dcfdd3d7b65c' (2022-12-26)
• Added input 'nix-pre-commit-hooks/flake-compat':
    'github:edolstra/flake-compat/009399224d5e398d03b22badca40a37ac85412a1' (2022-11-17)
• Added input 'nix-pre-commit-hooks/gitignore':
    'github:hercules-ci/gitignore.nix/a20de23b925fd8264fd7fad6454652e142fd7f73' (2022-08-14)
• Added input 'nix-pre-commit-hooks/gitignore/nixpkgs':
    follows 'nix-pre-commit-hooks/nixpkgs'
• Added input 'nix-pre-commit-hooks/nixpkgs-stable':
    'github:NixOS/nixpkgs/d513b448cc2a6da2c8803e3c197c9fc7e67b19e3' (2022-12-17)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/6b35a59c19ddbbeb229fcd1d3dcd422dcc0fa927' (2022-11-04)
  → 'github:nixos/nixos-hardware/0517e81e8ce24a0f4f9eebedbd7bbefcac97c058' (2023-01-01)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/a2a777538d971c6b01c6e54af89ddd6567c055e8' (2022-11-03)
  → 'github:nixos/nixpkgs/677ed08a50931e38382dbef01cba08a8f7eac8f6' (2022-12-29)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/486b4455da16272c1ed31bc82adcdbe7af829465' (2022-11-02)
  → 'github:Mic92/sops-nix/b35586cc5abacd4eba9ead138b53e2a60920f781' (2023-01-01)
• Removed input 'sops-nix/nixpkgs-22_05'
• Added input 'sops-nix/nixpkgs-stable':
    'github:NixOS/nixpkgs/feda52be1d59f13b9aa02f064b4f14784b9a06c8' (2022-12-31)
2023-01-01 16:52:50 +00:00
Jakob Lechner
4d4e4d6bb5
Bump nixos version to 22.11 2023-01-01 16:52:15 +00:00
Jakob Lechner
c6dd03732d
Add settings for NAT with dynamic IPs 2022-11-04 23:05:33 +00:00
Jakob Lechner
b551451627
Change ddns provider to Duck DNS 2022-11-04 22:59:24 +00:00
Jakob Lechner
a3ce6223b1
Add colorchord service to raven 2022-11-04 22:43:40 +00:00
Jakob Lechner
7f2e0ea8e9
Reinstall party 2022-11-04 22:43:39 +00:00
Jakob Lechner
da5a194655
Add pipewire module 2022-11-04 22:43:37 +00:00
Jakob Lechner
9bf8b48a84
flake.lock: Update
Flake lock file updates:

• Updated input 'sbruder-overlay':
    'github:sbruder/nixpkgs-overlay/72d323ca0410a08abc2d981b812c5cd0fd3338bf' (2021-12-01)
  → 'github:sbruder/nixpkgs-overlay/ff4ce742bffb71fc983cb13a3634ec0d243d869c' (2022-11-04)
2022-11-04 22:37:03 +00:00
Jakob Lechner
886d3e0c37
Use stable nix package
unstable is no longer required as flakes are now part of nix
2022-11-04 19:03:03 +00:00
Jakob Lechner
83375b2ef6
flake.lock: Update
Flake lock file updates:

• Updated input 'flake-utils':
    'github:numtide/flake-utils/7e2a3b3dfd9af950a856d66b0a7d01e3c18aa249' (2022-07-04)
  → 'github:numtide/flake-utils/5aed5285a952e0b949eb3ba02c12fa4fcfef535f' (2022-11-02)
• Updated input 'nix-pre-commit-hooks':
    'github:cachix/pre-commit-hooks.nix/f436e6dbc10bb3500775785072a40eefe057b18e' (2022-07-23)
  → 'github:cachix/pre-commit-hooks.nix/3eb97d920682777005930ebe01797dc54b1ccb32' (2022-11-04)
• Updated input 'nixos-hardware':
    'github:nixos/nixos-hardware/83009edccc2e24afe3d0165ed98b60ff7471a5f8' (2022-07-21)
  → 'github:nixos/nixos-hardware/6b35a59c19ddbbeb229fcd1d3dcd422dcc0fa927' (2022-11-04)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/9370544d849be8a07193e7611d02e6f6f1b10768' (2022-07-28)
  → 'github:nixos/nixpkgs/4f09cfce9c1d54fb56b65125061a632849de1a49' (2022-11-02)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/2a93ea177c3d7700b934bf95adfe00c435f696b8' (2022-07-29)
  → 'github:nixos/nixpkgs/a2a777538d971c6b01c6e54af89ddd6567c055e8' (2022-11-03)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/d7f8cf1b77ebe5f287884f17b1ee4cc4f48bad1d' (2022-07-24)
  → 'github:Mic92/sops-nix/486b4455da16272c1ed31bc82adcdbe7af829465' (2022-11-02)
• Updated input 'sops-nix/nixpkgs-22_05':
    'github:NixOS/nixpkgs/2e14bc76ab41c60ba57fd57ff52badaa29d349f5' (2022-07-24)
  → 'github:NixOS/nixpkgs/6440d13df2327d2db13d3b17e419784020b71d22' (2022-10-30)
2022-11-04 18:24:35 +00:00
Jakob Lechner
b78e67c4f9
Add weinturm extensions 2022-11-04 18:08:20 +00:00
Jakob Lechner
dc7dcf104a
Remove unused mod-config 2022-08-04 03:05:52 +00:00
Jakob Lechner
115ae30929
Remove unused sites 2022-08-04 03:03:55 +00:00
Jakob Lechner
2c13e0d224
Remove unused modules 2022-08-04 03:02:35 +00:00
198 changed files with 929 additions and 19796 deletions

View file

@ -1,17 +1,19 @@
keys:
- &jalr 66FB54F6081375106EEBF651A222365EB448F934
- &jalr 3044E71E3DEFF49B586CF5809BF4FCCB90854DA9
- &simon 47E7559E037A35652DBBF8AA8D3C82F9F309F8EC
- &raven 10E468768E3BCD6459F9F11AC8F765CF8AD1F892
- &raven age1fleny85nvjh6g4arn2tkpju0smq2s4hawwpmnyvgcf0sy65wd3ks4lcvfa
creation_rules:
- path_regex: secrets\.yaml$
key_groups:
- pgp:
- *jalr
- *simon
age:
- *raven
- path_regex: machines/raven/secrets\.yaml$
key_groups:
- pgp:
- *jalr
- *simon
age:
- *raven

248
flake.lock generated
View file

@ -1,12 +1,51 @@
{
"nodes": {
"flake-utils": {
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1656928814,
"narHash": "sha256-RIFfgBuKz6Hp89yRr7+NR5tzIAbn52h8vT6vXkYjZoM=",
"lastModified": 1727196810,
"narHash": "sha256-xQzgXRlczZoFfrUdA4nD5qojCQVqpiIk82aYINQZd+U=",
"owner": "nix-community",
"repo": "disko",
"rev": "6d42596a35d34918a905e8539a44d3fc91f42b5b",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "7e2a3b3dfd9af950a856d66b0a7d01e3c18aa249",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"type": "github"
},
"original": {
@ -15,6 +54,27 @@
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"nix-pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"krops": {
"inputs": {
"flake-utils": [
@ -38,21 +98,43 @@
"type": "github"
}
},
"nix-pre-commit-hooks": {
"nix-github-actions": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs-unstable"
"sbruder-overlay",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1658611562,
"narHash": "sha256-jktQ3mRrFAiFzzmVxQXh+8IxZOEE4hfr7St3ncXeVy4=",
"lastModified": 1703863825,
"narHash": "sha256-rXwqjtwiGKJheXB43ybM8NwWB8rO2dSRrEqes0S7F5Y=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "5163432afc817cf8bd1f031418d1869e4c9d5547",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix-pre-commit-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs-unstable"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1726745158,
"narHash": "sha256-D5AegvGoEjt4rkKedmxlSEmC+nNLMBPWFxvmYnVLhjk=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "f436e6dbc10bb3500775785072a40eefe057b18e",
"rev": "4e743a6920eab45e8ba0fbe49dc459f1423a4b74",
"type": "github"
},
"original": {
@ -64,11 +146,11 @@
},
"nixos-hardware": {
"locked": {
"lastModified": 1658401027,
"narHash": "sha256-z/sDfzsFOoWNO9nZGfxDCNjHqXvSVZLDBDSgzr9qDXE=",
"lastModified": 1727040444,
"narHash": "sha256-19FNN5QT9Z11ZUMfftRplyNN+2PgcHKb3oq8KMW/hDA=",
"owner": "nixos",
"repo": "nixos-hardware",
"rev": "83009edccc2e24afe3d0165ed98b60ff7471a5f8",
"rev": "d0cb432a9d28218df11cbd77d984a2a46caeb5ac",
"type": "github"
},
"original": {
@ -80,59 +162,59 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1659052185,
"narHash": "sha256-TUbwbzCbprtWB9EtXPM52cWuKETuCV3H+cMXjLRbwTw=",
"lastModified": 1726969270,
"narHash": "sha256-8fnFlXBgM/uSvBlLWjZ0Z0sOdRBesyNdH0+esxqizGc=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "9370544d849be8a07193e7611d02e6f6f1b10768",
"rev": "23cbb250f3bf4f516a2d0bf03c51a30900848075",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-22.05",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-22_05": {
"nixpkgs-stable": {
"locked": {
"lastModified": 1658634393,
"narHash": "sha256-VW7edeFzA9VU8gZPxPFGpoPsM2AQLYHKhA9H5+OYtno=",
"lastModified": 1720386169,
"narHash": "sha256-NGKVY4PjzwAa4upkGtAMz1npHGoRzWotlSnVlqI40mo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2e14bc76ab41c60ba57fd57ff52badaa29d349f5",
"rev": "194846768975b7ad2c4988bdb82572c00222c0d7",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-22.05",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-asterisk": {
"nixpkgs-stable_2": {
"locked": {
"lastModified": 1639416782,
"narHash": "sha256-ULr0km91xD8g+UR/Br8PD+H0kMjT0lHVc12KRag7ue4=",
"owner": "yayayayaka",
"lastModified": 1725762081,
"narHash": "sha256-vNv+aJUW5/YurRy1ocfvs4q/48yVESwlC/yHzjkZSP8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ce220610f741ba209a02d7655fb3425f3e5a3358",
"rev": "dc454045f5b5d814e5862a6d057e7bb5c29edc05",
"type": "github"
},
"original": {
"owner": "yayayayaka",
"ref": "asterisk-secrets-handling",
"owner": "NixOS",
"ref": "release-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1659077768,
"narHash": "sha256-P0XIHBVty6WIuIrk2DZNvLcYev9956y1prT4zL212H8=",
"lastModified": 1726937504,
"narHash": "sha256-bvGoiQBvponpZh8ClUcmJ6QnsNKw0EMrCQJARK3bI1c=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "2a93ea177c3d7700b934bf95adfe00c435f696b8",
"rev": "9357f4f23713673f310988025d9dc261c20e70c6",
"type": "github"
},
"original": {
@ -142,14 +224,42 @@
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": [
"sbruder-overlay",
"flake-utils"
],
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"sbruder-overlay",
"nixpkgs"
],
"systems": "systems_2",
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1714509427,
"narHash": "sha256-YTcd6n7BeAVxBNhzOgUHMmsgBkfQ2Cz9ZcFotXrpEg8=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "184960be60652ca7f865123e8394ece988afb566",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"root": {
"inputs": {
"disko": "disko",
"flake-utils": "flake-utils",
"krops": "krops",
"nix-pre-commit-hooks": "nix-pre-commit-hooks",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs",
"nixpkgs-asterisk": "nixpkgs-asterisk",
"nixpkgs-unstable": "nixpkgs-unstable",
"sbruder-overlay": "sbruder-overlay",
"sops-nix": "sops-nix"
@ -165,14 +275,15 @@
],
"nixpkgs": [
"nixpkgs"
]
],
"poetry2nix": "poetry2nix"
},
"locked": {
"lastModified": 1638388788,
"narHash": "sha256-4t+iDoZO9X8fM1cWfbCbsIagRN0PRkpGcJKaMLJE7yc=",
"lastModified": 1719952130,
"narHash": "sha256-j38XlExNwK4ycmoNEdH/dHUd1QGdNvD3gx/UuLY+04Q=",
"owner": "sbruder",
"repo": "nixpkgs-overlay",
"rev": "72d323ca0410a08abc2d981b812c5cd0fd3338bf",
"rev": "3487b8ce24d40cc898f3dba0a9af5e028e1d5844",
"type": "github"
},
"original": {
@ -186,14 +297,14 @@
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-22_05": "nixpkgs-22_05"
"nixpkgs-stable": "nixpkgs-stable_2"
},
"locked": {
"lastModified": 1658635258,
"narHash": "sha256-EC8y3Rg+l9IzIUdOaFSA0LMdDipTRoweg1Y2EL8XhMc=",
"lastModified": 1726524647,
"narHash": "sha256-qis6BtOOBBEAfUl7FMHqqTwRLB61OL5OFzIsOmRz2J4=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "d7f8cf1b77ebe5f287884f17b1ee4cc4f48bad1d",
"rev": "e2d404a7ea599a013189aa42947f66cede0645c8",
"type": "github"
},
"original": {
@ -201,6 +312,57 @@
"repo": "sops-nix",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"id": "systems",
"type": "indirect"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"sbruder-overlay",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1714058656,
"narHash": "sha256-Qv4RBm4LKuO4fNOfx9wl40W2rBbv5u5m+whxRYUMiaA=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "c6aaf729f34a36c445618580a9f95a48f5e4e03f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
}
},
"root": "root",

View file

@ -1,16 +1,17 @@
{
inputs = {
disko.inputs.nixpkgs.follows = "nixpkgs";
disko.url = "github:nix-community/disko";
flake-utils.url = "github:numtide/flake-utils";
nix-pre-commit-hooks.url = "github:cachix/pre-commit-hooks.nix/master";
nix-pre-commit-hooks.inputs.flake-utils.follows = "flake-utils";
nix-pre-commit-hooks.inputs.nixpkgs.follows = "nixpkgs-unstable";
nixpkgs.url = "github:nixos/nixpkgs/nixos-22.05";
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable";
# TODO: Remove when https://github.com/NixOS/nixpkgs/pull/149323 is merged
nixpkgs-asterisk.url = "github:yayayayaka/nixpkgs/asterisk-secrets-handling";
nixos-hardware.url = "github:nixos/nixos-hardware/master";
@ -39,7 +40,7 @@
let
pkgs = import nixpkgs {
inherit system;
overlays = [ self.overlay ];
overlays = [ self.overlays.default ];
};
inherit (pkgs) lib;
in
@ -55,7 +56,7 @@
};
};
devShell = pkgs.mkShell {
devShells.default = pkgs.mkShell {
name = "fablab-nixos-config";
buildInputs = (with pkgs; [
@ -112,7 +113,7 @@
fablab;
});
}) // {
overlay = import ./pkgs;
overlays.default = import ./pkgs;
nixosConfigurations = nixpkgs.lib.mapAttrs
(hostname: { system
@ -145,6 +146,7 @@
})
] ++ (with inputs; [
sops-nix.nixosModules.sops
disko.nixosModules.disko
]) ++ extraModules;
})
(import ./machines inputs);

View file

@ -1,23 +1,23 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mDMEYdCpCxYJKwYBBAHaRw8BAQdAL5OkhCMv9ekGaHmLALjDyINBhcR3gmuMZiE/
FzEjNLq0HEpha29iIExlY2huZXIgPG1haWxAamFsci5kZT6IlgQTFgoAPhYhBGb7
VPYIE3UQbuv2UaIiNl60SPk0BQJh0KkLAhsBBQleC+EABQsJCAcDBRUKCQgLBRYC
AwEAAh4BAheAAAoJEKIiNl60SPk0wrsBAKmdNnQza/qt6kMSt4/v/VLAwO9CkIYd
LQIbnDhZcmHxAQDdwWYnSNS357bz8YeUpUKeUfOZ6xAjyRmYuQQ2Mu4tDLgzBGHQ
qkkWCSsGAQQB2kcPAQEHQI0iSVnqIurvk2KV1vpvy4T678NWLqXgXooGTAD1Bq2E
iPUEGBYKACYWIQRm+1T2CBN1EG7r9lGiIjZetEj5NAUCYdCqSQIbAgUJAeEzgACB
CRCiIjZetEj5NHYgBBkWCgAdFiEECgvMdrJ/xQJ9TjAFmWCC77WQbBAFAmHQqkkA
CgkQmWCC77WQbBApJwD9HkYwBQDpNueYPTalsOrMDVUK2+jhNFrTVOLLeppevysA
/2aVnZxLJWh16T7gcQXW73Eifyq0DSzSRKfSKOeOn4kH8poA/jh+ubA9PO6PrwoB
MGRA/sZmPV7bR/Sm/6KWxzCUDSRpAP9EpOHwe0yb55yEyvJHD8vXB9jgeQu+im0y
UDCF+tX9Brg4BGHQqocSCisGAQQBl1UBBQEBB0BpQ5RvkE8dxQpSJKndxOXh6bIA
DOQu5VovlDinXLfYEAMBCAeIfgQYFgoAJhYhBGb7VPYIE3UQbuv2UaIiNl60SPk0
BQJh0KqHAhsMBQkB4TOAAAoJEKIiNl60SPk0uQoA/ibS+RGMq3jPQRy0019mi6yM
hLBjZFEhzh1TgzUvix89AP9dDgHnbS27tBUmqYDR9vFdS3Pww3YI8josvT7m2rJJ
CrgzBGHQqsUWCSsGAQQB2kcPAQEHQM2x+uWFR4z9MzwZnlFMgJrFXxpruZ58WukK
yWrCjURjiH4EGBYKACYWIQRm+1T2CBN1EG7r9lGiIjZetEj5NAUCYdCqxQIbIAUJ
AeEzgAAKCRCiIjZetEj5NJt/AQDB/+oiJ/+WUGViRfPt2xm0MFL+Qzsu+of22Y2I
Ho4ZAgD+IHfBGJyTsMEZktPW/j4wQG8BlFitcos1iRcRdDf6twY=
=trJC
mDMEZbmOERYJKwYBBAHaRw8BAQdAarCLR2RvxBnRODJY8WM98gCRbsHzXFTYTIoR
ZlmbOQe0HEpha29iIExlY2huZXIgPGphbHJAamFsci5kZT6IjgQTFgoANhYhBDBE
5x497/SbWGz1gJv0/MuQhU2pBQJluY4RAhsBBAsJCAcEFQoJCAUWAgMBAAIeBQIX
gAAKCRCb9PzLkIVNqbmFAQDG8xNgbZsZx6N2ssVC9k98IUvuKuMZQ6Gju86EsnNY
dgD/eSVRfAKCtIPSGtoLvE5zL80hk117R4f8rbMEvrmt9gm4MwRluY53FgkrBgEE
AdpHDwEBB0DRonRUQIQSfkqX7yHFHewbEYnc/spaPufL6EnSPVLvZ4j1BBgWCgAm
FiEEMETnHj3v9JtYbPWAm/T8y5CFTakFAmW5jncCGwIFCQHhM4AAgQkQm/T8y5CF
Tal2IAQZFgoAHRYhBDp0/wfiMHs2RqSZ6EYNR7hAgU8/BQJluY53AAoJEEYNR7hA
gU8/HikBAPOziBknk+WcsKODsdViFedagVgtnjW8J6mJZRKNcD2fAP4/42g9wU2i
KHKHypLlGdmgOVOpSGNcubkcPFcOOHH7AZevAQDUU/UNpIHe7R3rYq4sFT2iYa9T
ZKpmOostoAzyYOViZwD/RA2suqGyrSe96JLnxwzy3LccYgV3VwEbHDWeUTvOCAy4
OARluY6pEgorBgEEAZdVAQUBAQdAAXZvPoXdFpBhYS8KgCeXweUMlSwsCnXmgiDh
neSFMwsDAQgHiH4EGBYKACYWIQQwROcePe/0m1hs9YCb9PzLkIVNqQUCZbmOqQIb
DAUJAeEzgAAKCRCb9PzLkIVNqbmEAQDSBggKtjGkLuYtIHBBCfBF4Dx7odOapasa
tYqZTU7twwD/VhDvRGPbTl7X7DYQ36bmyjTe6cZAj3/M0ueQhlTrJAW4MwRluY7E
FgkrBgEEAdpHDwEBB0B95fmIsa7I4c3ttAko71CuEI/wTam0zYrYJNtL7sz3o4h+
BBgWCgAmFiEEMETnHj3v9JtYbPWAm/T8y5CFTakFAmW5jsQCGyAFCQHhM4AACgkQ
m/T8y5CFTamxRwD6A9TAs2Ac2VUQDCGgIEgUeULB2fZ1i0s0zydXctKJf7wBAL64
utFE0ryrkFHMGY4xHMwZfvWosYH/qfLlKadnb3cK
=WgEZ
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -4,7 +4,7 @@ let
in
{
raven = {
targetHost = "192.168.94.1";
targetHost = "raven.fablab-nea.de";
system = "x86_64-linux";
extraModules = [
hardware.common-cpu-intel

View file

@ -6,19 +6,27 @@
./services
];
nixpkgs.config = { allowAliases = false; };
console.keyMap = "de";
services.xserver.layout = "de";
services.xserver.enable = true;
services.xserver.desktopManager.gnome.enable = true;
services.xserver.displayManager.gdm.enable = true;
services.xserver.displayManager.gdm = {
enable = true;
autoSuspend = false;
};
security.sudo.wheelNeedsPassword = false;
users.users.party = {
isNormalUser = true;
password = "foobar";
extraGroups = [ "wheel" ];
extraGroups = [
"wheel"
"audio"
];
};
environment.systemPackages = with pkgs; [

View file

@ -27,12 +27,38 @@
fileSystems = {
"/" = {
device = "/dev/sda3";
device = "/dev/disk/by-uuid/740450af-f376-48d1-9a0c-25a035964700";
fsType = "btrfs";
options = [ "discard=async" "noatime" "compress=zstd" ];
options = [
"subvol=root"
"discard=async"
"compress=zstd"
];
};
"/home" = {
device = "/dev/disk/by-uuid/740450af-f376-48d1-9a0c-25a035964700";
fsType = "btrfs";
options = [
"subvol=home"
"discard=async"
"compress=zstd"
];
};
"/nix" = {
device = "/dev/disk/by-uuid/740450af-f376-48d1-9a0c-25a035964700";
fsType = "btrfs";
options = [
"subvol=nix"
"discard=async"
"compress=zstd"
"noatime"
];
};
"/boot" = {
device = "/dev/sda2";
device = "/dev/disk/by-uuid/3e24b5cf-e59f-41b1-9eef-107f808b9242";
fsType = "ext2";
};
};

View file

@ -1,31 +1,10 @@
{ inputs, lib, pkgs, ... }:
let
ledDevices = {
traverse = {
leds = 116;
host = "wled-Traverse";
};
nhecke = {
leds = 75;
host = "wled-Nhecke";
};
printerbench = {
leds = 80;
host = "wled-Printerbench";
};
resedaraum = {
leds = 285;
host = "wled-Resedaraum";
loop = true;
};
kanister = {
leds = 43;
host = "wled-Kanister";
};
dj-table-floor-02 = {
leds = 300;
host = "wled-DJ-Table-Floor-02";
};
bar = {
leds = 300;
host = "wled-Bar";
@ -107,8 +86,4 @@ in
wants = map (ledDevice: "colorchord-${soundDevice}@${ledDevice}.service") (lib.attrNames ledDevices);
})
(lib.attrNames soundDevices));
nixpkgs.overlays = with inputs; [
sbruder-overlay.overlay
];
}

View file

@ -3,6 +3,7 @@
{
imports = [
./hardware-configuration.nix
./disko.nix
./services
];
@ -72,7 +73,7 @@
};
jalr = {
isNormalUser = true;
extraGroups = [ "wheel" "docker" ];
extraGroups = [ "wheel" "docker" "audio" ];
openssh.authorizedKeys.keys = config.fablab.pubkeys.users.jalr;
};
};
@ -90,5 +91,5 @@
"192.168.94.1" = [ "raven.lab.fablab-nea.de" "labsync.lab.fablab-nea.de" ];
};
system.stateVersion = "21.05";
system.stateVersion = "24.05";
}

54
machines/raven/disko.nix Normal file
View file

@ -0,0 +1,54 @@
{
disko.devices = {
disk = {
nvme = {
type = "disk";
device = "/dev/disk/by-id/ata-WD_Green_2.5_240GB_232497451701";
content = {
type = "gpt";
partitions = {
esp = {
type = "EF00";
size = "1024M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "uid=0" "gid=0" "fmask=0077" "dmask=0077" "nodev" "nosuid" "noexec" ];
};
};
luks = {
size = "100%";
content = {
type = "luks";
name = "raven-crypt";
settings = {
allowDiscards = true;
};
extraFormatArgs = [ "--hash sha512 --use-random --pbkdf argon2id --iter-time 5000 --pbkdf-memory ${builtins.toString (4*1024*1024)} --pbkdf-parallel 4" ];
content = {
type = "btrfs";
extraArgs = [ "-f" ];
subvolumes = {
"/root" = {
mountpoint = "/";
mountOptions = [ "compress=zstd" "noatime" ];
};
"/home" = {
mountpoint = "/home";
mountOptions = [ "compress=zstd" "noatime" "nodev" "nosuid" ];
};
"/nix" = {
mountpoint = "/nix";
mountOptions = [ "compress=zstd" "noatime" "noatime" "nodev" ];
};
};
};
};
};
};
};
};
};
};
}

View file

@ -20,31 +20,13 @@
"cryptd"
];
kernelModules = [ "dm-snapshot" ];
luks.devices.root = {
name = "root";
device = "/dev/disk/by-uuid/ee78659c-52a5-4e81-8028-b43de08b6a55";
preLVM = true;
allowDiscards = true;
};
};
loader = {
systemd-boot.enable = true;
systemd-boot.configurationLimit = 20;
efi.efiSysMountPoint = "/boot";
efi.canTouchEfiVariables = true;
};
};
fileSystems = {
"/" = {
device = "/dev/disk/by-uuid/80209d1b-27c6-423d-93e8-cd39e1893873";
fsType = "btrfs";
options = [ "discard=async" "noatime" "compress=zstd" ];
};
"/boot" = {
device = "/dev/disk/by-uuid/20A0-5FD8";
fsType = "vfat";
};
};
}

Binary file not shown.

File diff suppressed because one or more lines are too long

View file

@ -1,5 +1,6 @@
{ config, lib, ... }:
let
cfg = config.services.asterisk;
secretConfigFiles = [
"ari"
"pjsip"
@ -21,11 +22,6 @@ in
same = n,VoiceMail(7929876@fablab,su)
same => n,Hangup()
[eventphone-in]
exten => _5257,1,Noop(Processing an incoming call)
same => n,Dial(PJSIP/101,60,tT)
same => n,Hangup()
exten => _3529,1,Noop(Processing an incoming call)
same => n,Dial(PJSIP/100,60,tT)
same => n,Hangup()
@ -44,14 +40,10 @@ in
exten = _1XX,1,Dial(PJSIP/''${EXTEN},30,tT)
same = n,Hangup()
; Kassen
exten = _4XX,1,Dial(PJSIP/''${EXTEN},30,tT)
same = n,Hangup()
; eventphone
exten => _XXXX,1,Noop(Processing an outgoing eventphone call)
same = n,Set(destination=''${EXTEN})
same = n,Goto(eventphone-out,''${CALLERID(num)},1)
; weinturm
exten = 410,1,Dial(PJSIP/100&PJSIP/410,30,tT)
same = n,Hangup()
@ -61,13 +53,6 @@ in
same => n,Dial(PJSIP/''${EXTEN}@sipgate,tT)
same => n,Hangup()
[eventphone-out]
exten => 100,1,Dial(PJSIP/''${destination}@eventphone_lab,30,tT)
same = n,Hangup()
exten => 101,1,Dial(PJSIP/''${destination}@eventphone_jalr,30,tT)
same = n,Hangup()
[cisco]
exten = _1XX,1,Dial(PJSIP/''${EXTEN},30,tT)
same = n,Hangup()
@ -81,6 +66,11 @@ in
; weinturm
exten = 410,1,Dial(PJSIP/100&PJSIP/410,30,tT)
same = n,Hangup()
; Kleinturm
exten = _58X,1,Dial(PJSIP/''${EXTEN},30,tT)
same = n,Hangup()
; /weinturm
'';
"http.conf" = ''
@ -102,10 +92,25 @@ in
rtpstart=${toString rtp.start}
rtpend=${toString rtp.end}
'';
"dnsmgr.conf" = ''
[general]
enable=yes
refreshinterval=60
'';
"prometheus.conf" = ''
[general]
enabled = yes
'';
};
useTheseDefaultConfFiles = [ ];
};
system.activationScripts.copyAsteriskFiles = lib.stringAfter [ "var" ] ''
rm -f /var/lib/asterisk/documentation/core-en_US.xml
mkdir -p /var/lib/asterisk/documentation
ln -s ${cfg.package}/var/lib/asterisk/static-http/core-en_US.xml /var/lib/asterisk/documentation/core-en_US.xml
'';
sops.secrets = (lib.listToAttrs (map
(name: lib.nameValuePair "asterisk-${name}" {
sopsFile = ../secrets.yaml;

View file

@ -0,0 +1,109 @@
{ inputs, lib, pkgs, ... }:
let
ledDevices = {
workbench-1 = {
leds = 87 * 2;
host = "wled-Workbench-1";
};
workbench-2 = {
leds = 87 * 2;
host = "wled-Workbench-2";
};
elektrodecke = {
leds = 87 * 2;
host = "wled-Elektrodecke";
};
traverse = {
leds = 235;
host = "wled-Traverse";
};
nhecke = {
leds = 75;
host = "wled-Nhecke";
};
printerbench = {
leds = 80;
host = "wled-Printerbench";
};
resedaraum = {
leds = 285;
host = "wled-Resedaraum";
loop = true;
};
};
soundDevices = {
sink = "alsa_output.usb-Burr-Brown_from_TI_USB_Audio_DAC-00.analog-stereo";
};
devicesProduct = lib.fold
(soundDevice: acc: acc // lib.mapAttrs'
(ledDevice: value: lib.nameValuePair "${ledDevice}-${soundDevice.name}" (value // {
source = soundDevice.id;
}))
ledDevices)
{ }
(lib.attrValues (lib.mapAttrs (n: v: { name = n; id = v; }) soundDevices));
in
{
environment.systemPackages = with pkgs; [
colorchord2
];
environment.etc = lib.mapAttrs'
(name: config: lib.nameValuePair
"colorchord/${name}.conf"
{
text = ''
# Basic
outdrivers = DisplayNetwork, OutputLinear
headless = 1
# Audio input
amplify = 10
samplerate = 48000
devrecord = ${config.source}
# Visualiser
cpu_autolimit = 1
satamp = 1
# LED config
leds = ${toString config.leds}
is_loop = ${if config ? loop && config.loop then "1" else "0"}
light_siding = 1.5
led_floor = 0.1
steady_bright = 1
fliprg = 0
# WLED
wled_realtime = 1
port = 19446
address = ${config.host}
wled_timeout = 2
skipfirst = 0
'';
})
devicesProduct;
systemd.user.services = builtins.listToAttrs (map
(soundDevice: lib.nameValuePair
"colorchord-${soundDevice}@"
{
partOf = [ "colorchord-${soundDevice}.target" ];
serviceConfig = {
ExecStart = ''
${pkgs.colorchord2}/bin/colorchord /etc/colorchord/%i-${soundDevice}.conf
'';
Restart = "always";
};
})
(lib.attrNames soundDevices));
systemd.user.targets = builtins.listToAttrs (map
(soundDevice: lib.nameValuePair
"colorchord-${soundDevice}"
{
wants = map (ledDevice: "colorchord-${soundDevice}@${ledDevice}.service") (lib.attrNames ledDevices);
})
(lib.attrNames soundDevices));
}

View file

@ -1,10 +1,15 @@
{
imports = [
./asterisk.nix
./colorchord.nix
./dnsmasq.nix
./dyndns.nix
./freeradius.nix
./grafana.nix
./labsync
./mailhog.nix
./prometheus.nix
./unifi-controller.nix
./wekan.nix
];
}

View file

@ -20,40 +20,53 @@ in
{
services.dnsmasq = {
enable = true;
extraConfig = ''
bind-dynamic
listen-address=192.168.93.1
listen-address=192.168.94.1
interface=lo
expand-hosts
domain=lab.fablab-nea.de
dhcp-range=192.168.93.20,192.168.93.254,4h
dhcp-range=192.168.94.20,192.168.94.254,4h
dhcp-boot=lpxelinux.0,raven,192.168.94.1
cache-size=10000
dns-forward-max=1000
auth-zone=lab.fablab-nea.de,192.168.94.0/24
auth-server=lab.fablab-nea.de,78.47.224.251
no-hosts
addn-hosts=${pkgs.writeText "hosts.dnsmasq" ''
settings = {
server = [
"142.250.185.78" # dns.as250.net
"2001:470:20::2" # ordns.he.net
"74.82.42.42" # ordns.he.net
];
bind-dynamic = true;
listen-address = [
"192.168.93.1"
"192.168.94.1"
];
interface = "lo";
expand-hosts = true;
domain = "lab.fablab-nea.de";
dhcp-range = [
"set:voice,192.168.93.20,192.168.93.254,4h"
"set:lab,192.168.94.20,192.168.94.254,4h"
];
dhcp-host = [
"00:30:42:1b:23:ed,192.168.93.21,rfp-01"
"00:30:42:1b:21:c1,192.168.93.22,rfp-02"
"00:30:42:1b:26:f6,192.168.93.23,rfp-03"
"00:30:42:1b:22:3b,192.168.93.24,rfp-04"
"00:30:42:1b:22:7c,192.168.93.25,rfp-05"
];
dhcp-option = [
"vendor:OpenMobility,10,192.168.93.21"
"vendor:OpenMobility,224,OpenMobilitySIP-DECT"
];
dhcp-boot = "lpxelinux.0,raven,192.168.94.1";
cache-size = 10000;
dns-forward-max = 1000;
auth-zone = "lab.fablab-nea.de,192.168.94.0/24";
auth-server = "lab.fablab-nea.de,78.47.224.251";
no-hosts = true;
addn-hosts = "${pkgs.writeText "hosts.dnsmasq" ''
192.168.94.1 raven labsync unifi
192.168.94.2 switch
192.168.94.3 schneiderscheune-weinturm-ap
192.168.94.4 schneiderscheune-weinturm-sta
''}
'';
servers = [
"142.250.185.78" # dns.as250.net
"2001:470:20::2" # ordns.he.net
"74.82.42.42" # ordns.he.net
];
192.168.94.5 wechselbruecke-router
192.168.94.6 wechselbruecke-ap
192.168.94.7 helferbereich-sta
192.168.94.8 helferbereich-switch
192.168.94.9 kleinturmbuehne-router
''}";
};
};
systemd.services."dnsmasq-events" = {

View file

@ -6,12 +6,11 @@
services.ddclient = {
enable = true;
interval = "1min";
use = "web, web=checkip.dynu.com/, web-skip='IP Address'";
server = "api.dynu.com";
protocol = "dyndns2";
username = "fablabnea";
server = "www.duckdns.org";
protocol = "duckdns";
username = "nouser";
passwordFile = config.sops.secrets.dyndns-password.path;
domains = [ "fablab-nea.freeddns.org" ];
ipv6 = false;
domains = [ "fablab-nea" ];
use = "web, web=freedns.afraid.org/dynamic/check.php";
};
}

View file

@ -0,0 +1,28 @@
{ config, lib, pkgs, ... }:
let
domain = "grafana.fablab-nea.de";
srv = config.services.grafana.settings.server;
in
{
services.grafana = {
enable = true;
settings.server.domain = domain;
};
services.nginx.virtualHosts."${domain}" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://${srv.http_addr}:${toString srv.http_port}";
recommendedProxySettings = true;
};
extraConfig = ''
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
'';
};
}

View file

@ -5,9 +5,7 @@ let
generator_port = 8695;
in
{
# FIXME: opentracker is disabled, because it depends on libowfat-0.32,
#which currently is marked as broken.
#services.opentracker.enable = true;
services.opentracker.enable = true;
services.nginx.virtualHosts."labsync.fablab-nea.de" = {
addSSL = true;

View file

@ -0,0 +1,4 @@
{ config, ... }:
{
services.mailhog.enable = true;
}

View file

@ -0,0 +1,144 @@
{ config, lib, pkgs, ... }:
let
domain = "prometheus.fablab-nea.de";
cfg = config.services.prometheus;
mkStaticTargets = targets: lib.singleton { inherit targets; };
mkStaticTarget = target: mkStaticTargets (lib.singleton target);
in
{
services.prometheus.exporters.node.enable = true;
services.prometheus = {
enable = true;
listenAddress = "127.0.0.1";
webExternalUrl = "https://${domain}";
globalConfig = {
scrape_interval = "15s";
evaluation_interval = "15s";
};
extraFlags = [
"--storage.tsdb.retention.time=90d"
"--web.enable-admin-api"
];
alertmanagers = [
{
static_configs = mkStaticTarget "${cfg.alertmanager.listenAddress}:${toString cfg.alertmanager.port}";
path_prefix = "/alertmanager/";
}
];
alertmanager = {
enable = true;
listenAddress = "127.0.0.1";
webExternalUrl = "https://${domain}/alertmanager";
configuration = {
global.resolve_timeout = "2m";
route = {
receiver = "matrix";
group_by = [ "alertname" ];
group_wait = "3m";
};
receivers = [
{
name = "matrix";
webhook_configs = lib.singleton {
url = "http://localhost/webhook";
};
}
];
};
};
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = mkStaticTargets [
"localhost:${toString cfg.port}"
"kleinturmbuehne-router:9100"
];
}
{
job_name = "node";
static_configs = mkStaticTargets [
"127.0.0.1:9100"
];
}
{
job_name = "asterisk";
metrics_path = "/";
static_configs = mkStaticTargets [
"127.0.0.1:8088"
];
}
{
job_name = "mikrotik";
static_configs = mkStaticTargets [
"${cfg.exporters.mikrotik.listenAddress}:${toString cfg.exporters.mikrotik.port}"
];
}
{
job_name = "unifi";
static_configs = mkStaticTargets [
"${cfg.exporters.unpoller.listenAddress}:${toString cfg.exporters.unpoller.port}"
];
}
];
rules =
let
mkAlert = { name, expr, for ? "1m", description ? null }: {
alert = name;
inherit expr for;
annotations = lib.optionalAttrs (description != null) { inherit description; };
};
in
[
(lib.generators.toYAML { } {
groups = lib.singleton {
name = "alert.rules";
rules = map mkAlert [
{
name = "InstanceDown";
expr = ''up == 0'';
description = "Instance {{ $labels.instance }} of job {{ $labels.job }} has been down for
more than 1 minutes.";
}
];
};
})
];
};
sops.secrets.prometheus-htpasswd = {
owner = "nginx";
sopsFile = ../secrets.yaml;
};
services.nginx.virtualHosts."${domain}" = {
enableACME = true;
forceSSL = true;
basicAuthFile = config.sops.secrets.prometheus-htpasswd.path;
locations = {
"/".proxyPass = "http://${cfg.listenAddress}:${toString cfg.port}";
"/alertmanager/".proxyPass = "http://${cfg.alertmanager.listenAddress}:${toString cfg.alertmanager.port}";
};
};
services.prometheus.exporters.mikrotik = {
enable = true;
listenAddress = "127.0.0.1";
configuration = {
devices = [
];
features = {
bgp = true;
dhcp = true;
routes = true;
optics = true;
};
};
};
}

View file

@ -1,9 +1,30 @@
{ pkgs, ... }:
{ config, pkgs, ... }:
let
promCfg = config.services.prometheus;
in
{
services.unifi = {
enable = true;
openFirewall = true;
unifiPackage = pkgs.unifi;
unifiPackage = pkgs.unifi8;
};
networking.firewall.allowedTCPPorts = [ 8443 ];
sops.secrets.unpoller-password = {
#owner = promCfg.exporters.unpoller.user;
owner = config.services.prometheus.exporters.unpoller.user;
sopsFile = ../secrets.yaml;
};
services.prometheus.exporters.unpoller = {
enable = true;
controllers = [{
user = "unpoller";
pass = config.sops.secrets.unpoller-password.path;
verify_ssl = false;
hash_pii = true;
}];
log.prometheusErrors = true;
};
}

View file

@ -0,0 +1,123 @@
{ config, lib, pkgs, ... }:
let
serviceName = "wekan";
databaseName = "wekandb";
networkName = "wekan-tier";
port = 8001;
domain = "wekan.fablab-nea.de";
url = "https://${domain}";
directories = {
db = "/var/lib/wekan/db";
dbDump = "/var/lib/wekan/db-dump";
data = "/var/lib/wekan/data";
};
in
{
virtualisation.oci-containers = {
backend = "podman";
containers = {
"${serviceName}" = {
autoStart = true;
image = "ghcr.io/wekan/wekan:latest";
environment = {
WRITABLE_PATH = "/data";
MONGO_URL = "mongodb://${databaseName}:27017/wekan";
ROOT_URL = url;
#WITH_API = "true";
RICHER_CARD_COMMENT_EDITOR = "false";
CARD_OPENED_WEBHOOK_ENABLED = "false";
BIGEVENTS_PATTERN = "NONE";
BROWSER_POLICY_ENABLED = "true";
};
ports = [
"127.0.0.1:${toString port}:8080"
];
dependsOn = [ databaseName ];
volumes = [
"/etc/localtime:/etc/localtime:ro"
"${directories.data}:/data:rw"
];
extraOptions = [
"--network=${networkName}"
"--pull=newer"
];
};
"${databaseName}" = {
autoStart = true;
image = "mongo:6";
cmd = [ "mongod" "--logpath" "/dev/null" "--oplogSize" "128" "--quiet" ];
volumes = [
"/etc/localtime:/etc/localtime:ro"
#"/etc/timezone:/etc/timezone:ro"
"${directories.db}:/data/db"
"${directories.dbDump}:/dump"
];
extraOptions = [
"--network=${networkName}"
"--pull=newer"
];
};
};
};
# Create the netowrk
systemd.services.init-filerun-network-and-files = {
description = "Create the network bridge ${networkName} for WeKan.";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script =
let podmancli = "${pkgs.podman}/bin/podman";
in ''
if ! ${podmancli} network ls --format '{{ .Name }}' | grep -qFx -- "${networkName}"; then
${podmancli} network create "${networkName}"
else
echo "network already exists"
fi
'';
};
systemd.services.wekan-restart = {
description = "Restart Wekan services.";
serviceConfig = {
Type = "oneshot";
};
script = ''
${pkgs.systemd}/bin/systemctl restart "podman-${databaseName}.service" "podman-${serviceName}.service"
'';
};
systemd.timers.wekan-restart = {
description = "Restart wekan containers";
after = [ "network.target" ];
wantedBy = [ "timers.target" ];
timerConfig = {
Persistent = true;
OnCalendar = "*-*-* 04:00:00";
Unit = "wekan-restart.service";
};
};
system.activationScripts.makeWekanDirectories = lib.stringAfter [ "var" ] ''
mkdir -p "${directories.db}"
mkdir -p "${directories.dbDump}"
mkdir -p "${directories.data}"
chown 999:999 "${directories.data}"
'';
services.nginx.virtualHosts."${domain}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
'';
locations."/" = {
proxyPass = "http://127.0.0.1:${toString port}";
};
};
}

View file

@ -1,3 +1,3 @@
{
boot.cleanTmpDir = true;
boot.tmp.cleanOnBoot = true;
}

View file

@ -2,6 +2,7 @@
imports = [
./base.nix
./nix.nix
./pipewire.nix
./pubkeys.nix
./sops.nix
./tools.nix

View file

@ -21,9 +21,6 @@ let
in
{
nix = {
# flake support
package = pkgs.nixUnstable;
extraOptions = ''
experimental-features = nix-command flakes
'';
@ -37,11 +34,13 @@ in
"nixpkgs-overlays=${overlaysCompat}"
];
# sudoers are trusted nix users
trustedUsers = [ "@wheel" ];
settings = {
# sudoers are trusted nix users
trusted-users = [ "@wheel" ];
# On-the-fly optimisation of nix store
autoOptimiseStore = true;
# On-the-fly optimisation of nix store
auto-optimise-store = true;
};
# less noticeable nix builds
daemonCPUSchedPolicy = "idle";
@ -50,8 +49,8 @@ in
};
nixpkgs.overlays = with inputs; [
self.overlay
self.overlays.default
sbruder-overlay.overlays.default
(final: prev: {
unstable = import nixpkgs-unstable {
inherit (config.nixpkgs)

24
modules/pipewire.nix Normal file
View file

@ -0,0 +1,24 @@
{ pkgs, ... }:
{
sound.enable = true;
hardware.pulseaudio.enable = false;
services.pipewire = {
enable = true;
pulse = {
enable = true;
};
jack = {
enable = false;
};
alsa = {
enable = true;
support32Bit = true;
};
};
environment.systemPackages = with pkgs; [
pulseaudio # pacmd and pactl
];
}

View file

@ -3,11 +3,11 @@
{
options.fablab.pubkeys = with lib.types; {
users = lib.mkOption {
type = attrsOf (listOf string);
type = attrsOf (listOf str);
description = "pubkeys for a specific user";
};
groups = lib.mkOption {
type = attrsOf (listOf string);
type = attrsOf (listOf str);
description = "pubkeys for a group of users";
};
};
@ -16,8 +16,7 @@
fablab.pubkeys = {
users = {
jalr = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQD0f7+Y4QSUsSvd360eq0Q/ESVfE/s0WxJIrzvW8cazTcmld8/rKxGQR2xrxApu7pzZlZC3LDbQrx3B6nNVEZi0dPUgkJz9oEKRY5vSJ6x0H9cZ0iFfTcCTz5znflqGaFI6E6W6Vtl+DzIrmkFgaR0wNmV9DCcYAJreW4E32t8dKsG1Pv347N0eAZs3shokPYr7dmGoNiKzTOn/ILQ1Hxppzqy1ch2h8k2KL0+FM6wO76ijivBzfMZRJW0DVYsmebO6Je5HglkzYXvrNUtcD2gIrNE0YKByjorTjjf3336S+0uBGxetzhnl+XA2PxHB/3n9AzYC4DI/Nb9wgLBo6Ql+EYaPLKnGl3JHvtcOyAfoNVPdNDfbZz+tfe8cBUt1IPTlm26RYKgwCnJvcBD6dk/5mxu1ogjSfgEIqihJaq3j3+NfIY1CUFx1U6ISG40SWEXF5xV1qW3NZg5FqqA8sOfWLlkON/yFkPJ2shXUXmiZtjXMWM6XLIO054EN7cpUxHGPspjgynU9XLc45c4k5lKF1xQv13B8n8dHNEL01MU21svfdGcpuOsRvzagLX51+rVRJObYP1bZudyYVDgsxB6B/TiBHw3Xl3mwEs4KVi/cqVsPpaG3hwqCreDlV+NeCVtb0qb1WJ2Sae83CA6NEcUvRbrwAnU/vEhJepfo6j7WSw== jalr@jalr-tp"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM2x+uWFR4z9MzwZnlFMgJrFXxpruZ58WukKyWrCjURj cardno:000616522763"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH3l+Yixrsjhze20CSjvUK4Qj/BNqbTNitgk20vuzPej cardno:25_750_479"
];
simon = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAJ7qUGZUjiDhQ6Se+aXr9DbgRTG2tx69owqVMkd2bna simon@mayushii"

View file

@ -23,7 +23,6 @@
compsize
curl
dnsutils
exa
fd
file
git

View file

@ -3,5 +3,6 @@
{
nixpkgs.config.allowUnfreePredicate = (pkg: lib.elem (lib.getName pkg) [
"unifi-controller"
"mongodb"
]);
}

View file

@ -1,4 +1,4 @@
{ lib, stdenvNoCC, ... }:
{ lib, freeradius, stdenvNoCC, ... }:
stdenvNoCC.mkDerivation {
name = "freeradius-anon-access";
@ -7,7 +7,11 @@ stdenvNoCC.mkDerivation {
installPhase = ''
mkdir $out
cp -r raddb $out
sed -i 's#@PREFIX@#${freeradius}#' $out/raddb/radiusd.conf
'';
nativeBuildInputs = [
freeradius
];
meta = with lib; {
platforms = platforms.unix;
};

View file

@ -1,116 +0,0 @@
Modules in Version 3
====================
As of Version 3, all of the modules have been placed in the
"mods-available/" directory. This practice follows that used by other
servers such as Nginx, Apache, etc. The "modules" directory should
not be used.
Modules are enabled by creating a file in the mods-enabled/ directory.
You can also create a soft-link from one directory to another::
$ cd raddb/mods-enabled
$ ln -s ../mods-available/foo
This will enable module "foo". Be sure that you have configured the
module correctly before enabling it, otherwise the server will not
start. You can verify the server configuration by running
"radiusd -XC".
A large number of modules are enabled by default. This allows the
server to work with the largest number of authentication protocols.
Please be careful when disabling modules. You will likely need to
edit the "sites-enabled/" files to remove references to any disabled
modules.
Conditional Modules
-------------------
Version 3 allows modules to be conditionally loaded. This is useful
when you want to have a virtual server which references a module, but
does not require it. Instead of editing the virtual server file, you
can just conditionally enable the module.
Modules are conditionally enabled by adding a "-" before their name in
a virtual server. For example, you can do::
server {
authorize {
...
ldap
-sql
...
}
}
This says "require the LDAP module, but use the SQL module only if it
is configured."
This feature is not very useful for production configurations. It is,
however, very useful for the default examples that ship with the
server.
Ignoring module
---------------
If you see this message::
Ignoring module (see raddb/mods-available/README.rst)
Then you are in the right place. Most of the time this message can be
ignored. The message can be fixed by finding the references to "-module"
in the virtual server, and deleting them.
Another way to fix it is to configure the module, as described above.
Simplification
--------------
Allowing conditional modules simplifies the default virtual servers
that are shipped with FreeRADIUS. This means that if you want to
enable LDAP (for example), you no longer need to edit the files in
raddb/sites-available/ in order to enable it.
Instead, you should edit the raddb/mods-available/ldap file to point
to your local LDAP server. Then, enable the module via the soft-link
method described above.
Once the module is enabled, it will automatically be used in the
default configuration.
Multiple Instances
------------------
It is sometimes necessary to have the same module do two different
things. The server supports this functionality via "instances" of
modules.
Normally, a module configuration looks like this:
sql {
... sql stuff ...
}
This module is then refereed to as the "sql" module.
But what happens if you want to connect to two different SQL
databases? The solution is simple; copy the "sql" module
configuration, and add an instance name after the "sql" string:
sql mysql1 {
... configuration for connecting to mysql11 ...
}
sql mysql2 {
... configuration for connecting to mysql12 ...
}
This configuration says "load the SQL module, but create two copies of
it, with different configurations". The different configurations can
be referred to by name, as "mysql1" and "mysql2". That is, anywhere
you would normally use "sql", you could use either "mysql1" or
"mysql2".
For further examples of using module instances, see the "attr_filter"
module configuration in this directory.

View file

@ -1,15 +0,0 @@
# -*- text -*-
##
## Module for PSK authorizations from ABFAB trust router
##
## $Id: d75130da8a9faeb9712619bf49e68afadc30b73a $
sql psksql {
driver = "rlm_sql_sqlite"
sqlite {
filename = "/var/lib/trust_router/keys"
}
}

View file

@ -1,132 +0,0 @@
# -*- text -*-
#
# $Id: 8bd4730cf570fdfedc9c516dc6974eab39981600 $
#
# A module to cache attributes. The idea is that you can look
# up information in a database, and then cache it. Repeated
# requests for the same information will then have the cached
# values added to the request.
#
# The module can cache a fixed set of attributes per key.
# It can be listed in "authorize", "post-auth", "pre-proxy"
# and "post-proxy".
#
# If you want different things cached for authorize and post-auth,
# you will need to define two instances of the "cache" module.
#
# The module returns "ok" if it found or created a cache entry.
# The module returns "updated" if it merged a cached entry.
# The module returns "noop" if it did nothing.
# The module returns "fail" on error.
#
cache {
# The backend datastore used to store the cache entries.
# Current datastores are
# rlm_cache_rbtree - An in memory, non persistent rbtree based datastore.
# Useful for caching data locally.
# rlm_cache_memcached - A non persistent "webscale" distributed datastore.
# Useful if the cached data need to be shared between
# a cluster of RADIUS servers.
# driver = "rlm_cache_rbtree"
#
# Some drivers accept specific options, to set them a
# config section with the the name as the driver should be added
# to the cache instance.
#
# Driver specific options are:
#
# memcached {
# # Memcached configuration options, as documented here:
# # http://docs.libmemcached.org/libmemcached_configuration.html#memcached
# options = "--SERVER=localhost"
#
# pool {
# start = ${thread[pool].start_servers}
# min = ${thread[pool].min_spare_servers}
# max = ${thread[pool].max_servers}
# spare = ${thread[pool].max_spare_servers}
# uses = 0
# lifetime = 0
# idle_timeout = 60
# }
# }
# The key used to index the cache. It is dynamically expanded
# at run time.
key = "%{User-Name}"
# The TTL of cache entries, in seconds. Entries older than this
# will be expired.
#
# This value should be between 10 and 86400.
ttl = 10
# If yes the following attributes will be added to the request:
# * &request:Cache-Entry-Hits - The number of times this entry
# has been retrieved.
#
# Note: Not supported by the rlm_cache_memcached module.
add_stats = no
#
# The list of attributes to cache for a particular key.
#
# Each key gets the same set of cached attributes. The attributes
# are dynamically expanded at run time.
#
# The semantics of this construct are identical to an unlang
# update block, except the left hand side of the expression
# represents the cache entry. see man unlang for more information
# on update blocks.
#
# Note: Only request, reply, control and session-state lists
# are available in cache entries. Attempting to store attributes
# in other lists will raise an error during config validation.
#
update {
# <list>:<attribute> <op> <value>
# Cache all instances of Reply-Message in the reply list
&reply:Reply-Message += &reply:Reply-Message[*]
# Add our own to show when the cache was last updated
&reply:Reply-Message += "Cache last updated at %t"
&reply:Class := "%{randstr:ssssssssssssssssssssssssssssssss}"
}
# This module supports a number of runtime configuration parameters
# represented by attributes in the &control: list.
#
# &control:Cache-TTL - Sets the TTL of an entry to be created, or
# modifies the TTL of an existing entry.
# - Setting a Cache-TTL of > 0 means set the TTL of the entry to
# the new value (and reset the expiry timer).
# - Setting a Cache-TTL of < 0 means expire the existing entry
# (without merging) and create a new one with TTL set to
# value * -1.
# - Setting a Cache-TTL of 0 means expire the existing entry
# (without merging) and don't create a new one.
#
# &control:Cache-Status-Only - If present and set to 'yes' will
# prevent a new entry from being created, and existing entries from
# being merged. It will also alter the module's return codes.
# - The module will return "ok" if a cache entry was found.
# - The module will return "notfound" if no cache entry was found.
#
# &control:Cache-Read-Only - If present and set to 'yes' will
# prevent a new entry from being created, but will allow existing
# entries to be merged. It will also alter the module's return codes.
# - The module will return "updated" if a cache entry was found.
# - The module will return "notfound" if no cache was found.
#
# &control:Cache-Merge - If present and set to 'yes' will merge new
# cache entries into the current request. Useful if results
# of execs or expansions are stored directly in the cache.
#
# All runtime configuration attributes will be removed from the
# &control: list after the cache module is called.
}

View file

@ -1,205 +0,0 @@
couchbase {
#
# List of Couchbase hosts (hosts may be space, tab, comma or semi-colon separated).
# Ports are optional if servers are listening on the standard port.
# Complete pool urls are preferred.
#
server = "http://cb01.blargs.com:8091/pools/ http://cb04.blargs.com:8091/pools/"
# Couchbase bucket name
bucket = "radius"
# Couchbase bucket password (optional)
#password = "password"
# Couchbase accounting document key (unlang supported)
acct_key = "radacct_%{%{Acct-Unique-Session-Id}:-%{Acct-Session-Id}}"
# Value for the 'docType' element in the json body for accounting documents
doctype = "radacct"
## Accounting document expire time in seconds (0 = never)
expire = 2592000
#
# Map attribute names to json element names for accounting.
#
# Configuration items are in the format:
# <radius attribute> = '<element name>'
#
# Element names should be single quoted.
#
# Note: Attributes not in this map will not be recorded.
#
update {
Acct-Session-Id = 'sessionId'
Acct-Unique-Session-Id = 'uniqueId'
Acct-Status-Type = 'lastStatus'
Acct-Authentic = 'authentic'
User-Name = 'userName'
Stripped-User-Name = 'strippedUserName'
Stripped-User-Domain = 'strippedUserDomain'
Realm = 'realm'
NAS-IP-Address = 'nasIpAddress'
NAS-Identifier = 'nasIdentifier'
NAS-Port = 'nasPort'
Called-Station-Id = 'calledStationId'
Called-Station-SSID = 'calledStationSSID'
Calling-Station-Id = 'callingStationId'
Framed-Protocol = 'framedProtocol'
Framed-IP-Address = 'framedIpAddress'
NAS-Port-Type = 'nasPortType'
Connect-Info = 'connectInfo'
Acct-Session-Time = 'sessionTime'
Acct-Input-Packets = 'inputPackets'
Acct-Output-Packets = 'outputPackets'
Acct-Input-Octets = 'inputOctets'
Acct-Output-Octets = 'outputOctets'
Acct-Input-Gigawords = 'inputGigawords'
Acct-Output-Gigawords = 'outputGigawords'
Event-Timestamp = 'lastUpdated'
}
# Couchbase document key for user documents (unlang supported)
user_key = "raduser_%{md5:%{tolower:%{%{Stripped-User-Name}:-%{User-Name}}}}"
# Set to 'yes' to read radius clients from the Couchbase view specified below.
# NOTE: Clients will ONLY be read on server startup.
#read_clients = no
#
# Map attribute names to json element names when loading clients.
#
# Configuration follows the same rules as the accounting map above.
#
client {
# Couchbase view that should return all available client documents.
view = "_design/client/_view/by_id"
#
# Sets default values (not obtained from couchbase) for new client entries
#
template {
# login = 'test'
# password = 'test'
# proto = tcp
# require_message_authenticator = yes
# Uncomment to add a home_server with the same
# attributes as the client.
# coa_server {
# response_window = 2.0
# }
}
#
# Client mappings are in the format:
# <client attribute> = '<element name>'
#
# Element names should be single quoted.
#
# The following attributes are required:
# * ipaddr | ipv4addr | ipv6addr - Client IP Address.
# * secret - RADIUS shared secret.
#
# All attributes usually supported in a client
# definition are also supported here.
#
attribute {
ipaddr = 'clientIdentifier'
secret = 'clientSecret'
shortname = 'clientShortname'
nas_type = 'nasType'
virtual_server = 'virtualServer'
require_message_authenticator = 'requireMessageAuthenticator'
limit {
max_connections = 'maxConnections'
lifetime = 'clientLifetime'
idle_timeout = 'idleTimeout'
}
}
}
# Set to 'yes' to enable simultaneous use checking (multiple logins).
# NOTE: This will cause the execution of a view request on every check
# and may be a performance penalty.
# check_simul = no
# Couchbase view that should return all account documents keyed by username.
# simul_view = "_design/acct/_view/by_user"
# The key to the above view.
# NOTE: This will need to match EXACTLY what you emit from your view.
# simul_vkey = "%{tolower:%{%{Stripped-User-Name}:-%{User-Name}}}"
# Set to 'yes' to enable verification of the results returned from the above view.
# NOTE: This may be an additional performance penalty to the actual check and
# should be avoided unless absolutely neccessary.
# verify_simul = no
# Remove stale session if checkrad does not see a double login.
# NOTE: This will only be executed if both check_simul and verify_simul
# are set to 'yes' above.
# delete_stale_sessions = yes
#
# The connection pool is new for 3.0, and will be used in many
# modules, for all kinds of connection-related activity.
#
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# couchbase being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set. This should be less than or equal to "max" above.
spare = ${thread[pool].max_spare_servers}
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The lifetime (in seconds) of the connection
#
# NOTE: A setting of 0 means infinite (no limit).
lifetime = 0
# The idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
#
# NOTE: A setting of 0 means infinite (no timeout).
idle_timeout = 1200
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
}

View file

@ -1,82 +0,0 @@
# -*- text -*-
#
# $Id: a5ac1e60ef117a2c59ace1a9d061d8f70d1da538 $
# counter module:
# This module takes an attribute (count-attribute).
# It also takes a key, and creates a counter for each unique
# key. The count is incremented when accounting packets are
# received by the server. The value of the increment depends
# on the attribute type.
# If the attribute is Acct-Session-Time or of an integer type we add
# the value of the attribute. If it is anything else we increase the
# counter by one.
#
# The 'reset' parameter defines when the counters are all reset to
# zero. It can be hourly, daily, weekly, monthly or never.
#
# hourly: Reset on 00:00 of every hour
# daily: Reset on 00:00:00 every day
# weekly: Reset on 00:00:00 on sunday
# monthly: Reset on 00:00:00 of the first day of each month
#
# It can also be user defined. It should be of the form:
# num[hdwm] where:
# h: hours, d: days, w: weeks, m: months
# If the letter is omitted days will be assumed. In example:
# reset = 10h (reset every 10 hours)
# reset = 12 (reset every 12 days)
#
#
# The check_name attribute defines an attribute which will be
# registered by the counter module and can be used to set the
# maximum allowed value for the counter after which the user
# is rejected.
# Something like:
#
# DEFAULT Max-Daily-Session := 36000
# Fall-Through = 1
#
# You should add the counter module in the instantiate
# section so that it registers check_name before the files
# module reads the users file.
#
# If check_name is set and the user is to be rejected then we
# send back a Reply-Message and we log a Failure-Message in
# the radius.log
#
# If the count attribute is Acct-Session-Time then on each
# login we send back the remaining online time as a
# Session-Timeout attribute ELSE and if the reply_name is
# set, we send back that attribute. The reply_name attribute
# MUST be of an integer type.
#
# The counter-name can also be used instead of using the check_name
# like below:
#
# DEFAULT Daily-Session-Time > 3600, Auth-Type = Reject
# Reply-Message = "You've used up more than one hour today"
#
# The allowed_service_type attribute can be used to only take
# into account specific sessions. For example if a user first
# logs in through a login menu and then selects ppp there will
# be two sessions. One for Login-User and one for Framed-User
# service type. We only need to take into account the second one.
#
# The module should be added in the instantiate, authorize and
# accounting sections. Make sure that in the authorize
# section it comes after any module which sets the
# 'check_name' attribute.
#
counter daily {
filename = ${db_dir}/db.daily
key = User-Name
count_attribute = Acct-Session-Time
reset = daily
counter_name = Daily-Session-Time
check_name = Max-Daily-Session
reply_name = Session-Timeout
allowed_service_type = Framed-User
cache_size = 5000
}

View file

@ -1,53 +0,0 @@
# -*- text -*-
#
# $Id: 54842d4106800babe8db1d58d2e8b7a5cad017db $
#
# Write Chargeable-User-Identity to the database.
#
# Schema raddb/mods-config/sql/cui/<DB>/schema.sql
# Queries raddb/mods-config/sql/cui/<DB>/queries.conf
#
sql cuisql {
# The dialect of SQL you want to use, this should usually match
# the driver below.
#
# If you're using rlm_sql_null, then it should be the type of
# database the logged queries are going to be executed against.
dialect = "sqlite"
# The sub-module to use to execute queries. This should match
# the database you're attempting to connect to.
#
# There are CUI queries available for:
# * rlm_sql_mysql
# * rlm_sql_postgresql
# * rlm_sql_sqlite
# * rlm_sql_null (log queries to disk)
#
driver = "rlm_sql_${dialect}"
sqlite {
filename = ${radacctdir}/cui.sqlite
bootstrap = ${modconfdir}/${..:name}/cui/sqlite/schema.sql
}
# Write CUI queries to a logfile. Useful for debugging.
# logfile = ${logdir}/cuilog.sql
pool {
start = 5
min = 4
max = 10
spare = 3
uses = 0
lifetime = 0
idle_timeout = 60
}
cui_table = "cui"
sql_user_name = "%{User-Name}"
$INCLUDE ${modconfdir}/${.:name}/cui/${dialect}/queries.conf
}

View file

@ -1,27 +0,0 @@
# -*- text -*-
#
# Detail file writer, used in the following examples:
#
# raddb/sites-available/robust-proxy-accounting
# raddb/sites-available/decoupled-accounting
#
# Note that this module can write detail files that are read by
# only ONE "listen" section. If you use BOTH of the examples
# above, you will need to define TWO "detail" modules.
#
# e.g. detail1.example.com && detail2.example.com
#
#
# We write *multiple* detail files here. They will be processed by
# the detail "listen" section in the order that they were created.
# The directory containing these files should NOT be used for any
# other purposes. i.e. It should have NO other files in it.
#
# Writing multiple detail enables the server to process the pieces
# in smaller chunks. This helps in certain catastrophic corner cases.
#
# $Id: 827cdf57e70dc2ff2252016194f4bb846eecead2 $
#
detail detail.example.com {
filename = ${radacctdir}/detail.example.com/detail-%Y%m%d:%H:%G
}

View file

@ -1,19 +0,0 @@
# -*- text -*-
#
# $Id: a4316335d7f73b37ec5aa9278de91d37dd28eddc $
#
# This module is useful only for 'xlat'. To use it,
# put 'dhcp' into the 'instantiate' section.
#
# %{dhcp_options:<Attribute-ref>} may be used to decode
# DHCP options data included in RADIUS packets by vendors
# of DHCP to RADIUS gateways.
#
# This is known to work with the following VSAs:
# * Juniper - ERX-Dhcp-Options
# * Alcatel lucent SR - Alc-ToServer-Dhcp-Options
# - Alc-ToClient-Dhcp-Options
#
dhcp {
}

View file

@ -1,56 +0,0 @@
# -*- text -*-
#
# $Id: 243a241a8d28d8de9696e5364c59e25558789219 $
# Instances of the "files" module for managing DHCP options
#
files dhcp_network {
# The file containing network-specific DHCP options mapping
filename = ${modconfdir}/files/dhcp
# For network lookups we use a fixed key. Matching
# actual networks is done by additional filtering within
# the file
key = "network"
}
files dhcp_subnet {
# The file containing subnet-specific DHCP options mapping
filename = ${modconfdir}/files/dhcp
# For subnet lookups we use a fixed key. Matching
# actual subnets is done by additional filtering within
# the file
key = "subnet"
}
files dhcp_set_group_options {
# An example of looking up DHCP group options. This
# is designed to be called from a policy configured in
# policy.d/dhcp.
#
# If clients are never members of more than one group,
# then this could be simplified such that DHCP-Group-Name
# is used here in place of Foreach-Variable-0 and this
# module instance called directly rather than the policy
# Use the same file as for subnets - could be split
# for large, complex installations
filename = ${modconfdir}/files/dhcp
# The key is a temporary string populated by the calling policy
# which uses a foreach loop.
key = "%{Foreach-Variable-0}"
}
files dhcp_hosts {
# An example of a DHCP host mapping for option setting
# Use the same file as for subnets - could be split
# for large, complex installations
filename = ${modconfdir}/files/dhcp
# If a different identifier is needed for looking up
# host specific entries then amend this key.
key = "host-%{DHCP-Client-Hardware-Address}"
}

View file

@ -1,20 +0,0 @@
# -*- text -*-
#
# $Id: 7884a00298935db8e33fd9f850c2619e61d9b5a9 $
# An instance of the passwd module designed for looking up
# DHCP client membership. This example is based on hardware
# address.
# The "groups" file should be of the format:
# <group name>|<hardware address>,<hardware address>,<hardware address>
# <group name>|<hardware address>,<hardware address>,<hardware address>
#
# See the passwd module for more details.
passwd dhcp_group_membership {
filename = "${modconfdir}/files/dhcp_groups"
format = "~DHCP-Group-Name:*,DHCP-Client-Hardware-Address"
hash_size = 100
allow_multiple_keys = yes
delimiter = "|"
}

View file

@ -1,92 +0,0 @@
# -*- text -*-
##
## mods-available/sql -- SQL modules
##
## $Id: 20dbe3a35be942acaaec8ee0ced7e85786fc46a7 $
######################################################################
#
# Configuration for the DHCP-specific instance of the SQL module
#
# The database schemas and queries are located in subdirectories:
#
# sql/dhcp/<DB>/schema.sql Schema
# sql/dhcp/<DB>/queries.conf Reply options lookup queries
#
# Where "DB" is mysql, mssql, oracle, or postgresql.
#
#
# See raddb/mods-available/sql for a description of the configuration items
# for the sql module.
#
sql dhcp_sql {
dialect = "sqlite"
driver = "rlm_sql_null"
# driver = "rlm_sql_${dialect}"
sqlite {
filename = "/tmp/freeradius.db"
busy_timeout = 200
bootstrap = "${modconfdir}/${..:name}/dhcp/sqlite/schema.sql"
}
mysql {
tls {
ca_file = "/etc/ssl/certs/my_ca.crt"
ca_path = "/etc/ssl/certs/"
certificate_file = "/etc/ssl/certs/private/client.crt"
private_key_file = "/etc/ssl/certs/private/client.key"
cipher = "DHE-RSA-AES256-SHA:AES128-SHA"
tls_required = yes
tls_check_cert = no
tls_check_cert_cn = no
}
warnings = auto
}
postgresql {
send_application_name = yes
}
mongo {
appname = "freeradius"
tls {
certificate_file = /path/to/file
certificate_password = "password"
ca_file = /path/to/file
ca_dir = /path/to/directory
crl_file = /path/to/file
weak_cert_validation = false
allow_invalid_hostname = false
}
}
# server = "localhost"
# port = 3306
# login = "radius"
# password = "radpass"
radius_db = "radius"
dhcpreply_table = "dhcpreply"
groupreply_table = "dhcpgroupreply"
dhcpgroup_table = "dhcpgroup"
read_groups = no
pool {
start = ${thread[pool].start_servers}
min = ${thread[pool].min_spare_servers}
max = ${thread[pool].max_servers}
spare = ${thread[pool].max_spare_servers}
uses = 0
retry_delay = 30
lifetime = 0
idle_timeout = 60
}
group_attribute = "${.:instance}-SQL-Group"
$INCLUDE ${modconfdir}/${.:name}/dhcp/${dialect}/queries.conf
}

View file

@ -1,101 +0,0 @@
# Configuration for DHCP for the SQL based IP Pools module (rlm_sqlippool).
#
# See raddb/mods-available/sqlippool for common configuration explanation
#
# See raddb/policy.d/dhcp_sqlippool for the "glue" code that allows
# the RADIUS based "sqlippool" module to be used for DHCP.
#
# See raddb/sites-available/dhcp for instructions on how to configure
# the DHCP server.
#
# The database schemas are available at:
#
# raddb/mods-config/sql/ippool-dhcp/<DB>/schema.sql
#
# $Id: 909b93c7ebcbbeb16b123ca38e696790b5771dda $
sqlippool dhcp_sqlippool {
# SQL instance to use (from mods-available/sql)
#
# If you have multiple sql instances, such as "sql sql1 {...}",
# use the *instance* name here: sql1.
sql_module_instance = "dhcp_sql"
# This is duplicative of info available in the SQL module, but
# we have to list it here as we do not yet support nested
# reference expansions.
dialect = "mysql"
# Name of the check item attribute to be used as a key in the SQL queries
pool_name = "Pool-Name"
# SQL table to use for ippool range and lease info
ippool_table = "dhcpippool"
# The duration for which a lease is reserved whilst under offer
offer_duration = 10
# IP lease duration. (Leases expire even if no DHCP-Release packet is received)
# Either use the value to be sent to the client or a hard coded one.
lease_duration = "%{reply:DHCP-IP-Address-Lease-Time}"
#lease_duration = 7200
# The attribute in which the IP address is returned in the reply
attribute_name = "DHCP-Your-IP-Address"
# Assign the IP address, even if the above attribute already exists in
# the reply.
#
# allow_duplicates = no
# The attribute in which an IP address hint may be supplied
req_attribute_name = "DHCP-Requested-IP-Address"
#
# RFC 2132 allows the DHCP client to supply a unique
# identifier ("uid") using Option 61 (DHCP-Client-Identifier)
# in which case it must be used as the lookup key for
# configuration data.
#
pool_key = "%{%{DHCP-Client-Identifier}:-%{DHCP-Client-Hardware-Address}}"
#
# The "uid" is generated by the OS which means that clients
# whose BMC piggybacks on the main interface (sharing its MAC,
# but generating a distinct uid) and dual-booting clients can
# be allocated multiple IPs, consuming more pool entries. To
# avoid this you can ignore the RFCs and key the configuration
# data based only on the client MAC address.
#
# pool_key = "%{DHCP-Client-Hardware-Address}"
################################################################
#
# WARNING: MySQL (MyISAM) has certain limitations that means it can
# hand out the same IP address to 2 different users.
#
# We suggest using an SQL DB with proper transaction
# support, such as PostgreSQL, or using MySQL
# with InnoDB.
#
################################################################
# These messages are added to the "control" items, as
# Module-Success-Message. They are not logged anywhere else,
# unlike previous versions. If you want to have them logged
# to a file, see the "linelog" module, and create an entry
# which writes Module-Success-Message message.
#
messages {
exists = "DHCP: Existing IP: %{reply:${..attribute_name}} (cid %{DHCP-Client-Identifier} chaddr %{DHCP-Client-Hardware-Address} giaddr %{DHCP-Gateway-IP-Address})"
success = "DHCP: Allocated IP: %{reply:${..attribute_name}} from %{control:${..pool_name}} (cid %{DHCP-Client-Identifier} chaddr %{DHCP-Client-Hardware-Address} giaddr %{DHCP-Gateway-IP-Address})"
clear = "DHCP: Released IP %{DHCP-Client-IP-Address} (cid %{DHCP-Client-Identifier} chaddr %{DHCP-Client-Hardware-Address} giaddr %{DHCP-Gateway-IP-Address})"
failed = "DHCP: IP Allocation FAILED from %{control:${..pool_name}} (cid %{DHCP-Client-Identifier} chaddr %{DHCP-Client-Hardware-Address} giaddr %{DHCP-Gateway-IP-Address})"
nopool = "DHCP: No ${..pool_name} defined (cid %{DHCP-Client-Identifier} chaddr %{DHCP-Client-Hardware-Address} giaddr %{DHCP-Gateway-IP-Address})"
}
$INCLUDE ${modconfdir}/sql/ippool-dhcp/${dialect}/queries.conf
}

View file

@ -1,28 +0,0 @@
# -*- text -*-
#
# $Id: f58b72f560ba067991d67295b546691bcd992d44 $
# "passwd" configuration, for the /etc/group file. Adds a Etc-Group-Name
# attribute for every group that the user is member of.
#
# You will have to define the Etc-Group-Name in the 'dictionary' file
# as a 'string' type.
#
# The Group and Group-Name attributes are automatically created by
# the Unix module, and do checking against /etc/group automatically.
# This means that you CANNOT use Group or Group-Name to do any other
# kind of grouping in the server. You MUST define a new group
# attribute.
#
# i.e. this module should NOT be used as-is, but should be edited to
# point to a different group file.
#
passwd etc_group {
filename = /etc/group
format = "=Etc-Group-Name:::*,User-Name"
hash_size = 50
ignore_nislike = yes
allow_multiple_keys = yes
delimiter = ":"
}

View file

@ -1,28 +0,0 @@
# -*- text -*-
#
# $Id: 534054077d52a7bb0bf8e02c1e861e5c86b76df9 $
#
# Internationalised domain names.
#
# The expansion string: %{idn: example.com} results in an ASCII
# punycode version of the domain name. That version can then be used
# for name comparisons. Using an i18n version of the name is NOT
# RECOMMENDED, as that version is not canonical.
#
# i.e. the "same" domain name can be represented in many, many,
# different ways. Only the idn version has *one* representation.
#
idn {
#
# Allow use of unassigned Unicode code points.
#
allow_unassigned = no
#
# Prohibit underscores and other invalid characters in domain
# names.
use_std3_ascii_rules = yes
}

View file

@ -1,107 +0,0 @@
# -*- text -*-
#
# $Id: 576eb7739ebf18ca6323cb740a7d4278ff6d6ea2 $
#
# Sample configuration for an EAP module that occurs *inside*
# of a tunneled method. It is used to limit the EAP types that
# can occur inside of the inner tunnel.
#
# See also raddb/sites-available/inner-tunnel
#
# See raddb/mods-available/eap for full documentation on the meaning of these
# configuration entries.
#
eap inner-eap {
# This is the best choice for PEAP.
default_eap_type = mschapv2
timer_expire = 60
# This should be the same as the outer eap "max sessions"
max_sessions = 2048
# Supported EAP-types
md5 {
}
gtc {
# The default challenge, which many clients
# ignore..
#challenge = "Password: "
auth_type = PAP
}
mschapv2 {
# See eap for documentation
# send_error = no
}
# No TTLS or PEAP configuration should be listed here.
## EAP-TLS
#
# You SHOULD use different certificates than are used
# for the outer EAP configuration!
#
# You can create the "inner-server.pem" file by doing:
#
# cd raddb/certs
# vi inner-server.cnf
# make inner-server
#
# The certificate MUST be different from the "server.cnf"
# file.
#
# Support for PEAP/TLS and RFC 5176 TLS/TLS is experimental.
# It might work, or it might not.
#
tls {
private_key_password = whatever
private_key_file = ${certdir}/inner-server.pem
# If Private key & Certificate are located in
# the same file, then private_key_file &
# certificate_file must contain the same file
# name.
#
# If ca_file (below) is not used, then the
# certificate_file below MUST include not
# only the server certificate, but ALSO all
# of the CA certificates used to sign the
# server certificate.
certificate_file = ${certdir}/inner-server.pem
# You may want different CAs for inner and outer
# certificates. If so, edit this file.
ca_file = ${cadir}/ca.pem
cipher_list = "DEFAULT"
# You may want to set a very small fragment size.
# The TLS data here needs to go inside of the
# outer EAP-TLS protocol.
#
# Try values and see if they work...
# fragment_size = 1024
# Other needful things
dh_file = ${certdir}/dh
random_file = /dev/urandom
# CRL and OCSP things go here. See the main "eap"
# file for details.
# check_crl = yes
# ca_path = /path/to/directory/with/ca_certs/and/crls/
# Accept an expired Certificate Revocation List
#
# allow_expired_crl = no
#
# The session resumption / fast re-authentication
# cache CANNOT be used for inner sessions.
#
}
}

View file

@ -1,66 +0,0 @@
# -*- text -*-
#
# $Id: 1d3305ba45ec71336f55f8f1db05f183772e1b82 $
# Do server side ip pool management. Should be added in
# post-auth and accounting sections.
#
# The module also requires the existence of the Pool-Name
# attribute. That way the administrator can add the Pool-Name
# attribute in the user profiles and use different pools for
# different users. The Pool-Name attribute is a *check* item
# not a reply item.
#
# The Pool-Name should be set to the ippool module instance
# name or to DEFAULT to match any module.
#
# Example:
# radiusd.conf: ippool students { [...] }
# ippool teachers { [...] }
# users file : DEFAULT Group == students, Pool-Name := "students"
# DEFAULT Group == teachers, Pool-Name := "teachers"
# DEFAULT Group == other, Pool-Name := "DEFAULT"
#
# Note: If you change the range parameters you must then erase the
# db files.
#
ippool main_pool {
# The main db file used to allocate addresses.
filename = ${db_dir}/db.ippool
# The start and end ip addresses for this pool.
range_start = 192.0.2.1
range_stop = 192.0.2.254
# The network mask used for this pool.
netmask = 255.255.255.0
# The gdbm cache size for the db files. Should
# be equal to the number of ip's available in
# the ip pool
cache_size = 800
# Helper db index file used in multilink
ip_index = ${db_dir}/db.ipindex
# If set, the Framed-IP-Address already in the
# reply (if any) will be discarded, and replaced
# ith a Framed-IP-Address assigned here.
override = no
# Specifies the maximum time in seconds that an
# entry may be active. If set to zero, means
# "no timeout". The default value is 0
maximum_timeout = 0
# The key to use for the session database (which
# holds the allocated ip's) normally it should
# just be the nas ip/port (which is the default).
#
# If your NAS sends the same value of NAS-Port
# all requests, the key should be based on some
# other attribute that is in ALL requests, AND
# is unique to each machine needing an IP address.
# key = "%{NAS-IP-Address} %{NAS-Port}"
}

View file

@ -1,82 +0,0 @@
# -*- text -*-
#
# $Id: c88b5fbb4b35cc4e61bfb93a616d891fb79ebc0c $
#
# Kerberos. See doc/modules/rlm_krb5 for minimal docs.
#
krb5 {
#
# The keytab file MUST be owned by the UID/GID used by the server.
# The keytab file MUST be writable by the server.
# The keytab file MUST NOT be readable by other users on the system.
# The keytab file MUST exist before the server is started.
#
keytab = ${localstatedir}/lib/radiusd/keytab
service_principal = name_of_principle
# Pool of krb5 contexts, this allows us to make the module multithreaded
# and to avoid expensive operations like resolving and opening keytabs
# on every request. It may also allow TCP connections to the KDC to be
# cached if that is supported by the version of libkrb5 used.
#
# The context pool is only used if the underlying libkrb5 reported
# that it was thread safe at compile time.
#
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# KDC being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set. This should be less than or equal to "max" above.
spare = ${thread[pool].max_spare_servers}
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The lifetime (in seconds) of the connection
#
# NOTE: A setting of 0 means infinite (no limit).
lifetime = 0
# The idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
#
# NOTE: A setting of 0 means infinite (no timeout).
idle_timeout = 0
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
}

View file

@ -1,666 +0,0 @@
# -*- text -*-
#
# $Id: 015ae6907b8113771691ae3a3c1d53b05756d143 $
#
# Lightweight Directory Access Protocol (LDAP)
#
ldap {
# Note that this needs to match the name(s) in the LDAP server
# certificate, if you're using ldaps. See OpenLDAP documentation
# for the behavioral semantics of specifying more than one host.
#
# Depending on the libldap in use, server may be an LDAP URI.
# In the case of OpenLDAP this allows additional the following
# additional schemes:
# - ldaps:// (LDAP over SSL)
# - ldapi:// (LDAP over Unix socket)
# - ldapc:// (Connectionless LDAP)
server = 'localhost'
# server = 'ldap.rrdns.example.org'
# server = 'ldap.rrdns.example.org'
# Port to connect on, defaults to 389, will be ignored for LDAP URIs.
# port = 389
# Administrator account for searching and possibly modifying.
# If using SASL + KRB5 these should be commented out.
# identity = 'cn=admin,dc=example,dc=org'
# password = mypass
# Unless overridden in another section, the dn from which all
# searches will start from.
base_dn = 'dc=example,dc=org'
#
# You can run the 'ldapsearch' command line tool using the
# parameters from this module's configuration.
#
# ldapsearch -D ${identity} -w ${password} -h ${server} -b 'CN=user,${base_dn}'
#
# That will give you the LDAP information for 'user'.
#
# Group membership can be queried by using the above "ldapsearch" string,
# and adding "memberof" qualifiers. For ActiveDirectory, use:
#
# ldapsearch ... '(&(objectClass=user)(sAMAccountName=user)(memberof=CN=group,${base_dn}))'
#
# Where 'user' is the user as above, and 'group' is the group you are querying for.
#
#
# SASL parameters to use for admin binds
#
# When we're prompted by the SASL library, these control
# the responses given, as well as the identity and password
# directives above.
#
# If any directive is commented out, a NULL response will be
# provided to cyrus-sasl.
#
# Unfortunately the only way to control Keberos here is through
# environmental variables, as cyrus-sasl provides no API to
# set the krb5 config directly.
#
# Full documentation for MIT krb5 can be found here:
#
# http://web.mit.edu/kerberos/krb5-devel/doc/admin/env_variables.html
#
# At a minimum you probably want to set KRB5_CLIENT_KTNAME.
#
sasl {
# SASL mechanism
# mech = 'PLAIN'
# SASL authorisation identity to proxy.
# proxy = 'autz_id'
# SASL realm. Used for kerberos.
# realm = 'example.org'
}
#
# Generic valuepair attribute
#
# If set, this will attribute will be retrieved in addition to any
# mapped attributes.
#
# Values should be in the format:
# <radius attr> <op> <value>
#
# Where:
# <radius attr>: Is the attribute you wish to create
# with any valid list and request qualifiers.
# <op>: Is any assignment operator (=, :=, +=, -=).
# <value>: Is the value to parse into the new valuepair.
# If the value is wrapped in double quotes it
# will be xlat expanded.
# valuepair_attribute = 'radiusAttribute'
#
# Mapping of LDAP directory attributes to RADIUS dictionary attributes.
#
# WARNING: Although this format is almost identical to the unlang
# update section format, it does *NOT* mean that you can use other
# unlang constructs in module configuration files.
#
# Configuration items are in the format:
# <radius attr> <op> <ldap attr>
#
# Where:
# <radius attr>: Is the destination RADIUS attribute
# with any valid list and request qualifiers.
# <op>: Is any assignment attribute (=, :=, +=, -=).
# <ldap attr>: Is the attribute associated with user or
# profile objects in the LDAP directory.
# If the attribute name is wrapped in double
# quotes it will be xlat expanded.
#
# Request and list qualifiers may also be placed after the 'update'
# section name to set defaults destination requests/lists
# for unqualified RADIUS attributes.
#
# Note: LDAP attribute names should be single quoted unless you want
# the name value to be derived from an xlat expansion, or an
# attribute ref.
update {
control:Password-With-Header += 'userPassword'
# control:NT-Password := 'ntPassword'
# reply:Reply-Message := 'radiusReplyMessage'
# reply:Tunnel-Type := 'radiusTunnelType'
# reply:Tunnel-Medium-Type := 'radiusTunnelMediumType'
# reply:Tunnel-Private-Group-ID := 'radiusTunnelPrivategroupId'
# Where only a list is specified as the RADIUS attribute,
# the value of the LDAP attribute is parsed as a valuepair
# in the same format as the 'valuepair_attribute' (above).
control: += 'radiusControlAttribute'
request: += 'radiusRequestAttribute'
reply: += 'radiusReplyAttribute'
}
# Set to yes if you have eDirectory and want to use the universal
# password mechanism.
# edir = no
# Set to yes if you want to bind as the user after retrieving the
# Cleartext-Password. This will consume the login grace, and
# verify user authorization.
# edir_autz = no
# Note: set_auth_type was removed in v3.x.x
#
# Equivalent functionality can be achieved by adding the
# following "if" statement to the authorize {} section of
# the virtual server, after the "ldap" module. For example:
#
# ...
# ldap
# if ((ok || updated) && User-Password && !control:Auth-Type) {
# update {
# control:Auth-Type := ldap
# }
# }
# ...
#
# You will also need to uncomment the "Auth-Type LDAP" block in the
# "authenticate" section.
#
#
# Name of the attribute that contains the user DN.
# The default name is LDAP-UserDn.
#
# If you have multiple LDAP instances, you should
# change this configuration item to:
#
# ${.:instance}-LDAP-UserDn
#
# That change allows the modules to set their own
# User DN, and to not conflict with each other.
#
user_dn = "LDAP-UserDn"
#
# User object identification.
#
user {
# Where to start searching in the tree for users
base_dn = "${..base_dn}"
# Filter for user objects, should be specific enough
# to identify a single user object.
#
# For Active Directory, you should use
# "samaccountname=" instead of "uid="
#
filter = "(uid=%{%{Stripped-User-Name}:-%{User-Name}})"
# For Active Directory nested group, you should comment out the previous 'filter = ...'
# and use the below. Where 'group' is the group you are querying for.
#
# NOTE: The string '1.2.840.113556.1.4.1941' specifies LDAP_MATCHING_RULE_IN_CHAIN.
# This applies only to DN attributes. This is an extended match operator that walks
# the chain of ancestry in objects all the way to the root until it finds a match.
# This reveals group nesting. It is available only on domain controllers with
# Windows Server 2003 SP2 or Windows Server 2008 (or above).
#
# See: https://social.technet.microsoft.com/wiki/contents/articles/5392.active-directory-ldap-syntax-filters.aspx
#
# filter = "(&(objectClass=user)(sAMAccountName=%{%{Stripped-User-Name}:-%{User-Name}})(memberOf:1.2.840.113556.1.4.1941:=cn=group,${..base_dn}))"
# SASL parameters to use for user binds
#
# When we're prompted by the SASL library, these control
# the responses given.
#
# Any of the config items below may be an attribute ref
# or and expansion, so different SASL mechs, proxy IDs
# and realms may be used for different users.
sasl {
# SASL mechanism
# mech = 'PLAIN'
# SASL authorisation identity to proxy.
# proxy = &User-Name
# SASL realm. Used for kerberos.
# realm = 'example.org'
}
# Search scope, may be 'base', 'one', sub' or 'children'
# scope = 'sub'
# Server side result sorting
#
# A list of space delimited attributes to order the result
# set by, if the filter matches multiple objects.
# Only the first result in the set will be processed.
#
# If the attribute name is prefixed with a hyphen '-' the
# sorting order will be reversed for that attribute.
#
# If sort_by is set, and the server does not support sorting
# the search will fail.
# sort_by = '-uid'
# If this is undefined, anyone is authorised.
# If it is defined, the contents of this attribute
# determine whether or not the user is authorised
# access_attribute = 'dialupAccess'
# Control whether the presence of 'access_attribute'
# allows access, or denys access.
#
# If 'yes', and the access_attribute is present, or
# 'no' and the access_attribute is absent then access
# will be allowed.
#
# If 'yes', and the access_attribute is absent, or
# 'no' and the access_attribute is present, then
# access will not be allowed.
#
# If the value of the access_attribute is 'false', it
# will negate the result.
#
# e.g.
# access_positive = yes
# access_attribute = userAccessAllowed
#
# With an LDAP object containing:
# userAccessAllowed: false
#
# Will result in the user being locked out.
# access_positive = yes
}
#
# User membership checking.
#
group {
# Where to start searching in the tree for groups
base_dn = "${..base_dn}"
# Filter for group objects, should match all available
# group objects a user might be a member of.
#
# If using Active Directory you are likely to need "group"
# instead of "posixGroup".
filter = '(objectClass=posixGroup)'
# Search scope, may be 'base', 'one', sub' or 'children'
# scope = 'sub'
# Attribute that uniquely identifies a group.
# Is used when converting group DNs to group
# names.
# name_attribute = cn
# Filter to find all group objects a user is a member of.
# That is, group objects with attributes that
# identify members (the inverse of membership_attribute).
#
# Note that this configuration references the "user_dn"
# configuration defined above.
#
# membership_filter = "(|(member=%{control:${..user_dn}})(memberUid=%{%{Stripped-User-Name}:-%{User-Name}}))"
# The attribute, in user objects, which contain the names
# or DNs of groups a user is a member of.
#
# Unless a conversion between group name and group DN is
# needed, there's no requirement for the group objects
# referenced to actually exist.
#
# If the LDAP server does not support the "memberOf"
# attribute (or equivalent), then you will need to use the
# membership_filter option above instead. If you can't see
# the memberOf attribute then it is also possible that the
# LDAP bind user does not have the correct permissions to
# view it.
membership_attribute = 'memberOf'
# If cacheable_name or cacheable_dn are enabled,
# all group information for the user will be
# retrieved from the directory and written to LDAP-Group
# attributes appropriate for the instance of rlm_ldap.
#
# For group comparisons these attributes will be checked
# instead of querying the LDAP directory directly.
#
# This feature is intended to be used with rlm_cache.
#
# If you wish to use this feature, you should enable
# the type that matches the format of your check items
# i.e. if your groups are specified as DNs then enable
# cacheable_dn else enable cacheable_name.
# cacheable_name = 'no'
# cacheable_dn = 'no'
# Override the normal cache attribute (<inst>-LDAP-Group or
# LDAP-Group if using the default instance) and create a
# custom attribute. This can help if multiple module instances
# are used in fail-over.
# cache_attribute = 'LDAP-Cached-Membership'
# If the group being checked is specified as a name, but
# the user's groups are referenced by DN, and one of those
# group DNs is invalid, the whole group check is treated as
# invalid, and a negative result will be returned.
# When set to 'yes', this option ignores invalid DN
# references.
# allow_dangling_group_ref = 'no'
}
#
# User profiles. RADIUS profile objects contain sets of attributes
# to insert into the request. These attributes are mapped using
# the same mapping scheme applied to user objects (the update section above).
#
profile {
# Filter for RADIUS profile objects
# filter = '(objectclass=radiusprofile)'
# The default profile. This may be a DN or an attribute
# reference.
# To get old v2.2.x style behaviour, or to use the
# &User-Profile attribute to specify the default profile,
# set this to &control:User-Profile.
# default = 'cn=radprofile,dc=example,dc=org'
# The LDAP attribute containing profile DNs to apply
# in addition to the default profile above. These are
# retrieved from the user object, at the same time as the
# attributes from the update section, are are applied
# if authorization is successful.
# attribute = 'radiusProfileDn'
}
#
# Bulk load clients from the directory
#
client {
# Where to start searching in the tree for clients
base_dn = "${..base_dn}"
#
# Filter to match client objects
#
filter = '(objectClass=radiusClient)'
# Search scope, may be 'base', 'one', 'sub' or 'children'
# scope = 'sub'
#
# Sets default values (not obtained from LDAP) for new client entries
#
template {
# login = 'test'
# password = 'test'
# proto = tcp
# require_message_authenticator = yes
# Uncomment to add a home_server with the same
# attributes as the client.
# coa_server {
# response_window = 2.0
# }
}
#
# Client attribute mappings are in the format:
# <client attribute> = <ldap attribute>
#
# The following attributes are required:
# * ipaddr | ipv4addr | ipv6addr - Client IP Address.
# * secret - RADIUS shared secret.
#
# All other attributes usually supported in a client
# definition are also supported here.
#
# Schemas are available in doc/schemas/ldap for openldap and eDirectory
#
attribute {
ipaddr = 'radiusClientIdentifier'
secret = 'radiusClientSecret'
# shortname = 'radiusClientShortname'
# nas_type = 'radiusClientType'
# virtual_server = 'radiusClientVirtualServer'
# require_message_authenticator = 'radiusClientRequireMa'
}
}
# Load clients on startup
# read_clients = no
#
# Modify user object on receiving Accounting-Request
#
# Useful for recording things like the last time the user logged
# in, or the Acct-Session-ID for CoA/DM.
#
# LDAP modification items are in the format:
# <ldap attr> <op> <value>
#
# Where:
# <ldap attr>: The LDAP attribute to add modify or delete.
# <op>: One of the assignment operators:
# (:=, +=, -=, ++).
# Note: '=' is *not* supported.
# <value>: The value to add modify or delete.
#
# WARNING: If using the ':=' operator with a multi-valued LDAP
# attribute, all instances of the attribute will be removed and
# replaced with a single attribute.
accounting {
reference = "%{tolower:type.%{Acct-Status-Type}}"
type {
start {
update {
description := "Online at %S"
}
}
interim-update {
update {
description := "Last seen at %S"
}
}
stop {
update {
description := "Offline at %S"
}
}
}
}
#
# Post-Auth can modify LDAP objects too
#
post-auth {
update {
description := "Authenticated at %S"
}
}
#
# LDAP connection-specific options.
#
# These options set timeouts, keep-alives, etc. for the connections.
#
options {
# Control under which situations aliases are followed.
# May be one of 'never', 'searching', 'finding' or 'always'
# default: libldap's default which is usually 'never'.
#
# LDAP_OPT_DEREF is set to this value.
# dereference = 'always'
#
# The following two configuration items control whether the
# server follows references returned by LDAP directory.
# They are mostly for Active Directory compatibility.
# If you set these to 'no', then searches will likely return
# 'operations error', instead of a useful result.
#
chase_referrals = yes
rebind = yes
# SASL Security Properties (see SASL_SECPROPS in ldap.conf man page).
# Note - uncomment when using GSS-API sasl mechanism along with TLS
# encryption against Active-Directory LDAP servers (this disables
# sealing and signing at the GSS level as required by AD).
#sasl_secprops = 'noanonymous,noplain,maxssf=0'
# Seconds to wait for LDAP query to finish. default: 20
res_timeout = 10
# Seconds LDAP server has to process the query (server-side
# time limit). default: 20
#
# LDAP_OPT_TIMELIMIT is set to this value.
srv_timelimit = 3
# Seconds to wait for response of the server. (network
# failures) default: 10
#
# LDAP_OPT_NETWORK_TIMEOUT is set to this value.
net_timeout = 1
# LDAP_OPT_X_KEEPALIVE_IDLE
idle = 60
# LDAP_OPT_X_KEEPALIVE_PROBES
probes = 3
# LDAP_OPT_X_KEEPALIVE_INTERVAL
interval = 3
# ldap_debug: debug flag for LDAP SDK
# (see OpenLDAP documentation). Set this to enable
# huge amounts of LDAP debugging on the screen.
# You should only use this if you are an LDAP expert.
#
# default: 0x0000 (no debugging messages)
# Example:(LDAP_DEBUG_FILTER+LDAP_DEBUG_CONNS)
ldap_debug = 0x0028
}
#
# This subsection configures the tls related items
# that control how FreeRADIUS connects to an LDAP
# server. It contains all of the 'tls_*' configuration
# entries used in older versions of FreeRADIUS. Those
# configuration entries can still be used, but we recommend
# using these.
#
tls {
# Set this to 'yes' to use TLS encrypted connections
# to the LDAP database by using the StartTLS extended
# operation.
#
# The StartTLS operation is supposed to be
# used with normal ldap connections instead of
# using ldaps (port 636) connections
# start_tls = yes
# ca_file = ${certdir}/cacert.pem
# ca_path = ${certdir}
# certificate_file = /path/to/radius.crt
# private_key_file = /path/to/radius.key
# random_file = /dev/urandom
# Certificate Verification requirements. Can be:
# 'never' (do not even bother trying)
# 'allow' (try, but don't fail if the certificate
# cannot be verified)
# 'demand' (fail if the certificate does not verify)
# 'hard' (similar to 'demand' but fails if TLS
# cannot negotiate)
#
# The default is libldap's default, which varies based
# on the contents of ldap.conf.
# require_cert = 'demand'
#
# Minimum TLS version to accept. We STRONGLY recommend
# setting this to "1.2"
#
# tls_min_version = "1.2"
}
# As of version 3.0, the 'pool' section has replaced the
# following configuration items:
#
# ldap_connections_number
# The connection pool is new for 3.0, and will be used in many
# modules, for all kinds of connection-related activity.
#
# When the server is not threaded, the connection pool
# limits are ignored, and only one connection is used.
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# directory being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set. This should be less than or equal to "max" above.
spare = ${thread[pool].max_spare_servers}
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The number of seconds to wait after the server tries
# to open a connection, and fails. During this time,
# no new connections will be opened.
retry_delay = 30
# The lifetime (in seconds) of the connection
lifetime = 0
# Idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
idle_timeout = 60
# NOTE: All configuration settings are enforced. If a
# connection is closed because of 'idle_timeout',
# 'uses', or 'lifetime', then the total number of
# connections MAY fall below 'min'. When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the 'min' connections,
# or increase lifetime/idle_timeout.
}
}

View file

@ -1,25 +0,0 @@
# -*- text -*-
#
# $Id: a4ead1d64e8220344b483718ece4712bef5e9e36 $
######################################################################
#
# This next section is a sample configuration for the "passwd"
# module, that reads flat-text files.
#
# The file is in the format <mac>,<ip>
#
# 00:01:02:03:04:05,192.0.2.100
# 01:01:02:03:04:05,192.0.2.101
# 02:01:02:03:04:05,192.0.2.102
#
# This lets you perform simple static IP assignments from a flat-text
# file. You will have to define lease times yourself.
#
######################################################################
passwd mac2ip {
filename = ${modconfdir}/${.:name}/${.:instance}
format = "*DHCP-Client-Hardware-Address:=DHCP-Your-IP-Address"
delimiter = ","
}

View file

@ -1,18 +0,0 @@
# -*- text -*-
#
# $Id: a1db803a71cddbb98daeeeda515cff2fc77ea318 $
# A simple file to map a MAC address to a VLAN.
#
# The file should be in the format MAC,VLAN
# the VLAN name cannot have spaces in it, for example:
#
# 00:01:02:03:04:05,VLAN1
# 03:04:05:06:07:08,VLAN2
# ...
#
passwd mac2vlan {
filename = ${modconfdir}/${.:name}/${.:instance}
format = "*VMPS-Mac:=VMPS-VLAN-Name"
delimiter = ","
}

View file

@ -1,57 +0,0 @@
# -*- text -*-
#
# $Id: 1b27b44b5a2d82e23d67c07ba19f0ef3293960d2 $
#
# Write Moonshot-*-TargetedId (MSTID) to the database.
#
# Schema raddb/mods-config/sql/moonshot-targeted-ids/<DB>/schema.sql
# Queries raddb/mods-config/sql/moonshot-targeted-ids/<DB>/queries.conf
#
sql moonshot_tid_sql {
# The dialect of SQL you want to use, this should usually match
# the driver below.
#
# If you're using rlm_sql_null, then it should be the type of
# database the logged queries are going to be executed against.
dialect = "sqlite"
# The sub-module to use to execute queries. This should match
# the database you're attempting to connect to.
#
# There are MSTID queries available for:
# * rlm_sql_mysql
# * rlm_sql_postgresql
# * rlm_sql_sqlite
# * rlm_sql_null (log queries to disk)
#
driver = "rlm_sql_${dialect}"
sqlite {
filename = ${radacctdir}/moonshot-targeted-ids.sqlite
bootstrap = ${modconfdir}/${..:name}/moonshot-targeted-ids/sqlite/schema.sql
}
# Write MSTID queries to a logfile. Useful for debugging.
# logfile = ${logdir}/moonshot-targeted-id-log.sql
pool {
start = 5
min = 4
max = 10
spare = 3
uses = 0
lifetime = 0
idle_timeout = 60
}
# If you adjust the table name here, you must also modify the table name in
# the moonshot_get_targeted_id.post-auth policy in policy.d/moonshot-targeted-ids
# and the schema.sql files in the mods-config/sql/moonshot-targeted-ids tree.
#
moonshot_tid_table = "moonshot_targeted_ids"
sql_user_name = "%{User-Name}"
$INCLUDE ${modconfdir}/${.:name}/moonshot-targeted-ids/${dialect}/queries.conf
}

View file

@ -1,26 +0,0 @@
# -*- text -*-
#
# $Id: 443d74dc08f19ddb59ea342f756c90066623e1c6 $
# This module is only used when the server is running on the same
# system as OpenDirectory. The configuration of the module is hard-coded
# by Apple, and cannot be changed here.
#
# There are no configuration entries for this module.
#
# The MS-CHAP module will automatically talk to OpenDirectory, if the
# server is built on an OSX machine. However, you must also set
# dsAttrTypeNative:apple-enabled-auth-mech attribute in the
# /config/dirserv OpenDirectory record. You will probably also need
# to change the user passwords in order to re-generate the
# appropriate hashes.
#
# Complete OSX configuration information is available on Apple's web site:
#
# https://developer.apple.com/support/macos-server/macOS-Server-Service-Migration-Guide.pdf
#
# See also https://discussions.apple.com/thread/6053980?tstart=0
#
opendirectory {
}

View file

@ -1,75 +0,0 @@
#
# Configuration for the OTP module.
#
# This module allows you to use various handheld OTP tokens
# for authentication (Auth-Type := otp). These tokens are
# available from various vendors.
#
# It works in conjunction with otpd, which implements token
# management and OTP verification functions; and lsmd or gsmd,
# which implements synchronous state management functions.
# You must list this module in BOTH the authorize and authenticate
# sections in order to use it.
otp {
# otpd rendezvous point.
# (default: /var/run/otpd/socket)
#otpd_rp = /var/run/otpd/socket
# Text to use for the challenge.
# Default "Challenge: %{reply:OTP-Challenge}\n Response: "
challenge_prompt = "Challenge: %{reply:OTP-Challenge} \n Response: "
# Length of the challenge. Most tokens probably support a
# max of 8 digits. (range: 5-32 digits, default 6)
#challenge_length = 6
# Maximum time, in seconds, that a challenge is valid.
# (The user must respond to a challenge within this time.)
# It is also the minimal time between consecutive async mode
# authentications, a necessary restriction due to an inherent
# weakness of the RADIUS protocol which allows replay attacks.
# (default: 30)
#challenge_delay = 30
# Whether or not to allow asynchronous ("pure" challenge/
# response) mode authentication. Since sync mode is much more
# usable, and all reasonable tokens support it, the typical
# use of async mode is to allow re-sync of event based tokens.
# But because of the vulnerability of async mode with some tokens,
# you probably want to disable this and require that out-of-sync
# users re-sync from specifically secured terminals.
# See the otpd docs for more info.
# (default: no)
#allow_async = no
# Whether or not to allow synchronous mode authentication.
# When using otpd with lsmd, it is *CRITICALLY IMPORTANT*
# that if your OTP users can authenticate to multiple RADIUS
# servers, this must be "yes" for the primary/default server,
# and "no" for the others. This is because lsmd does not
# share state information across multiple servers. Using "yes"
# on all your RADIUS servers would allow replay attacks!
# Also, for event based tokens, the user will be out of sync
# on the "other" servers. In order to use "yes" on all your
# servers, you must either use gsmd, which synchronises state
# globally, or implement your own state synchronisation method.
# (default: yes)
#allow_sync = yes
# If both allow_async and allow_sync are "yes", a challenge is
# always presented to the user. This is incompatible with NAS
# that can't present or don't handle Access-Challenge's, e.g.
# PPTP servers. Even though a challenge is presented, the user
# can still enter their synchronous passcode.
# The following are MPPE settings. Note that MS-CHAP (v1) is
# strongly discouraged. All possible values are listed as
# {value = meaning}. Default values are first.
#mschapv2_mppe = {2 = required, 1 = optional, 0 = forbidden}
#mschapv2_mppe_bits = {2 = 128, 1 = 128 or 40, 0 = 40}
#mschap_mppe = {2 = required, 1 = optional, 0 = forbidden}
#mschap_mppe_bits = {2 = 128}
}

View file

@ -1,26 +0,0 @@
# -*- text -*-
#
# $Id: f4a91a948637bb2f42f613ed9faa6f9ae9ae6099 $
# Pluggable Authentication Modules
#
# For Linux, see:
# http://www.kernel.org/pub/linux/libs/pam/index.html
#
# WARNING: On many systems, the system PAM libraries have
# memory leaks! We STRONGLY SUGGEST that you do not
# use PAM for authentication, due to those memory leaks.
#
pam {
#
# The name to use for PAM authentication.
# PAM looks in /etc/pam.d/${pam_auth_name}
# for it's configuration. See 'redhat/radiusd-pam'
# for a sample PAM configuration file.
#
# Note that any Pam-Auth attribute set in the 'authorize'
# section will over-ride this one.
#
pam_auth = radiusd
}

View file

@ -1,94 +0,0 @@
# -*- text -*-
#
# $Id: fa04cdabb71767050aaa0664da792fd6086adb19 $
# Persistent, embedded Perl interpreter.
#
perl {
#
# The Perl script to execute on authorize, authenticate,
# accounting, xlat, etc. This is very similar to using
# 'rlm_exec' module, but it is persistent, and therefore
# faster.
#
filename = ${modconfdir}/${.:instance}/example.pl
#
# Options which are passed to the Perl interpreter.
# These are (mostly) the same options as are passed
# to the "perl" command line.
#
# The most useful flag is "-T". This sets tainting on. And
# as of 3.0.18, makes it impossible to leverage bad
# User-Names into local command execution.
#
perl_flags = "-T"
#
# The following hashes are given to the module and
# filled with value-pairs (Attribute names and values)
#
# %RAD_CHECK Check items
# %RAD_REQUEST Attributes from the request
# %RAD_REPLY Attributes for the reply
# %RAD_REQUEST_PROXY Attributes from the proxied request
# %RAD_REQUEST_PROXY_REPLY Attributes from the proxy reply
#
# The interface between FreeRADIUS and Perl is strings.
# That is, attributes of type "octets" are converted to
# printable strings, such as "0xabcdef". If you want to
# access the binary values of the attributes, you should
# call the Perl "pack" function. Then to send any binary
# data back to FreeRADIUS, call the Perl "unpack" function,
# so that the contents of the hashes are printable strings.
#
# IP addresses are sent as strings, e.g. "192.0.2.25", and
# not as a 4-byte binary value. The same applies to other
# attribute data types.
#
# Attributes of type "string" are copied to Perl as-is.
# They are not escaped or interpreted.
#
# The return codes from functions in the perl_script
# are passed directly back to the server. These
# codes are defined in mods-config/example.pl
#
# You can define configuration items (and nested sub-sections) in perl "config" section.
# These items will be accessible in the perl script through %RAD_PERLCONF hash.
# For instance: $RAD_PERLCONF{'name'} $RAD_PERLCONF{'sub-config'}->{'name'}
#
#config {
# name = "value"
# sub-config {
# name = "value of name from config.sub-config"
# }
#}
#
# List of functions in the module to call.
# Uncomment and change if you want to use function
# names other than the defaults.
#
#func_authenticate = authenticate
#func_authorize = authorize
#func_preacct = preacct
#func_accounting = accounting
#func_checksimul = checksimul
#func_pre_proxy = pre_proxy
#func_post_proxy = post_proxy
#func_post_auth = post_auth
#func_recv_coa = recv_coa
#func_send_coa = send_coa
#func_xlat = xlat
#func_detach = detach
#
# Uncomment the following lines if you wish
# to use separate functions for Start and Stop
# accounting packets. In that case, the
# func_accounting function is not called.
#
#func_start_accounting = accounting_start
#func_stop_accounting = accounting_stop
}

View file

@ -1,65 +0,0 @@
#
# Make sure the PYTHONPATH environmental variable contains the
# directory(s) for the modules listed below.
#
# Uncomment any func_* which are included in your module. If
# rlm_python is called for a section which does not have
# a function defined, it will return NOOP.
#
python {
# Path to the python modules
#
# Note that due to limitations on Python, this configuration
# item is GLOBAL TO THE SERVER. That is, you cannot have two
# instances of the python module, each with a different path.
#
# python_path="${modconfdir}/${.:name}:/path/to/python/files:/another_path/to/python_files/"
module = example
# Pass all VPS lists as a 6-tuple to the callbacks
# (request, reply, config, state, proxy_req, proxy_reply)
# pass_all_vps = no
# Pass all VPS lists as a dictionary to the callbacks
# Keys: "request", "reply", "config", "session-state", "proxy-request",
# "proxy-reply"
# This option prevales over "pass_all_vps"
# pass_all_vps_dict = no
# mod_instantiate = ${.module}
# func_instantiate = instantiate
# mod_detach = ${.module}
# func_detach = detach
# mod_authorize = ${.module}
# func_authorize = authorize
# mod_authenticate = ${.module}
# func_authenticate = authenticate
# mod_preacct = ${.module}
# func_preacct = preacct
# mod_accounting = ${.module}
# func_accounting = accounting
# mod_checksimul = ${.module}
# func_checksimul = checksimul
# mod_pre_proxy = ${.module}
# func_pre_proxy = pre_proxy
# mod_post_proxy = ${.module}
# func_post_proxy = post_proxy
# mod_post_auth = ${.module}
# func_post_auth = post_auth
# mod_recv_coa = ${.module}
# func_recv_coa = recv_coa
# mod_send_coa = ${.module}
# func_send_coa = send_coa
}

View file

@ -1,65 +0,0 @@
#
# Make sure the PYTHONPATH environmental variable contains the
# directory(s) for the modules listed below.
#
# Uncomment any func_* which are included in your module. If
# rlm_python is called for a section which does not have
# a function defined, it will return NOOP.
#
python3 {
# Path to the python modules
#
# Note that due to limitations on Python, this configuration
# item is GLOBAL TO THE SERVER. That is, you cannot have two
# instances of the python module, each with a different path.
#
# python_path="${modconfdir}/${.:name}:/another_path/to/python_files"
module = example
# Pass all VPS lists as a 6-tuple to the callbacks
# (request, reply, config, state, proxy_req, proxy_reply)
# pass_all_vps = no
# Pass all VPS lists as a dictionary to the callbacks
# Keys: "request", "reply", "config", "session-state", "proxy-request",
# "proxy-reply"
# This option prevales over "pass_all_vps"
# pass_all_vps_dict = no
# mod_instantiate = ${.module}
# func_instantiate = instantiate
# mod_detach = ${.module}
# func_detach = detach
# mod_authorize = ${.module}
# func_authorize = authorize
# mod_authenticate = ${.module}
# func_authenticate = authenticate
# mod_preacct = ${.module}
# func_preacct = preacct
# mod_accounting = ${.module}
# func_accounting = accounting
# mod_checksimul = ${.module}
# func_checksimul = checksimul
# mod_pre_proxy = ${.module}
# func_pre_proxy = pre_proxy
# mod_post_proxy = ${.module}
# func_post_proxy = post_proxy
# mod_post_auth = ${.module}
# func_post_auth = post_auth
# mod_recv_coa = ${.module}
# func_recv_coa = recv_coa
# mod_send_coa = ${.module}
# func_send_coa = send_coa
}

View file

@ -1,99 +0,0 @@
# -*- text -*-
#
# $Id: 64789f58a7f937b7b9f4c7ff783153fb5194ba25 $
#
# Configuration file for the "redis" module. This module does nothing
# Other than provide connections to a redis database, and a %{redis: ...}
# expansion.
#
redis {
# Host where the redis server is located.
# We recommend using ONLY 127.0.0.1 !
server = 127.0.0.1
# Select the Redis logical database having the specified zero-based numeric index.
# database = 0
# The default port.
port = 6379
# The password used to authenticate to the server.
# We recommend using a strong password.
# password = thisisreallysecretandhardtoguess
# Set connection and query timeout for rlm_redis
query_timeout = 5
#
# Information for the connection pool. The configuration items
# below are the same for all modules which use the new
# connection pool.
#
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# web service being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set. This should be less than or equal to "max" above.
spare = ${thread[pool].max_spare_servers}
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The number of seconds to wait after the server tries
# to open a connection, and fails. During this time,
# no new connections will be opened.
retry_delay = 30
# The lifetime (in seconds) of the connection
#
# NOTE: A setting of 0 means infinite (no limit).
lifetime = 86400
# The pool is checked for free connections every
# "cleanup_interval". If there are free connections,
# then one of them is closed.
cleanup_interval = 300
# The idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
#
# NOTE: A setting of 0 means infinite (no timeout).
idle_timeout = 600
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
}

View file

@ -1,52 +0,0 @@
# -*- text -*-
#
# $Id: d303550fa48460f9583c051795ad7f179fcbd36b $
#
# Configuration file for the "rediswho" module.
#
# This module tracks the last set of login sessions for a user.
#
rediswho {
# REDIS instance to use (from mods-available/redis)
#
# If you have multiple redis instances, such as "redis redis1 {...}",
# use the *instance* name here: redis1.
# redis_module_instance = redis
# How many sessions to keep track of per user.
# If there are more than this number, older sessions are deleted.
trim_count = 15
# Expiry time in seconds. Any sessions which have not received
# an update in this time will be automatically expired.
expire_time = 86400
#
# Each subsection contains insert / trim / expire queries.
# The subsections are named after the contents of the
# Acct-Status-Type attribute. See dictionary.rfc2866 for names
# of the various Acct-Status-Type values, or look at the output
# of debug mode.
#
# This module supports *any* Acct-Status-Type. Just add a subsection
# of the appropriate name, along with insert / trim / expire queries.
#
Start {
insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
trim = "LTRIM %{User-Name} 0 ${..trim_count}"
expire = "EXPIRE %{User-Name} ${..expire_time}"
}
Interim-Update {
insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
trim = "LTRIM %{User-Name} 0 ${..trim_count}"
expire = "EXPIRE %{User-Name} ${..expire_time}"
}
Stop {
insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
trim = "LTRIM %{User-Name} 0 ${..trim_count}"
expire = "EXPIRE %{User-Name} ${..expire_time}"
}
}

View file

@ -1,290 +0,0 @@
rest {
#
# This subsection configures the tls related items
# that control how FreeRADIUS connects to a HTTPS
# server.
#
tls {
# Certificate Authorities:
# "ca_file" (libcurl option CURLOPT_ISSUERCERT).
# File containing a single CA, which is the issuer of the server
# certificate.
# "ca_info_file" (libcurl option CURLOPT_CAINFO).
# File containing a bundle of certificates, which allow to handle
# certificate chain validation.
# "ca_path" (libcurl option CURLOPT_CAPATH).
# Directory holding CA certificates to verify the peer with.
# ca_file = ${certdir}/cacert.pem
# ca_info_file = ${certdir}/cacert_bundle.pem
# ca_path = ${certdir}
# certificate_file = /path/to/radius.crt
# private_key_file = /path/to/radius.key
# private_key_password = "supersecret"
# random_file = /dev/urandom
# Server certificate verification requirements. Can be:
# "no" (don't even bother trying)
# "yes" (verify the cert was issued by one of the
# trusted CAs)
#
# The default is "yes"
# check_cert = yes
# Server certificate CN verification requirements. Can be:
# "no" (don't even bother trying)
# "yes" (verify the CN in the certificate matches the host
# in the URI)
#
# The default is "yes"
# check_cert_cn = yes
}
# rlm_rest will open a connection to the server specified in connect_uri
# to populate the connection cache, ready for the first request.
# The server will not start if the server specified is unreachable.
#
# If you wish to disable this pre-caching and reachability check,
# comment out the configuration item below.
connect_uri = "http://127.0.0.1/"
#
# How long before new connection attempts timeout, defaults to 4.0 seconds.
#
# connect_timeout = 4.0
#
# Specify HTTP protocol version to use. one of '1.0', '1.1', '2.0', '2.0+auto',
# '2.0+tls' or 'default'. (libcurl option CURLOPT_HTTP_VERSION)
#
# http_negotiation = 1.1
#
# The following config items can be used in each of the sections.
# The sections themselves reflect the sections in the server.
# For example if you list rest in the authorize section of a virtual server,
# the settings from the authorize section here will be used.
#
# The following config items may be listed in any of the sections:
# uri - to send the request to.
# method - HTTP method to use, one of 'get', 'post', 'put', 'patch',
# 'delete' or any custom HTTP method.
# body - The format of the HTTP body sent to the remote server.
# May be 'none', 'post' or 'json', defaults to 'none'.
# attr_num - If true, the attribute number is supplied for each attribute.
# Defaults to false.
# raw_value - If true, enumerated attribute values are provided as numeric
# values. Defaults to false.
# data - Send custom freeform data in the HTTP body. Content-type
# may be specified with 'body'. Will be expanded.
# Values from expansion will not be escaped, this should be
# done using the appropriate xlat method e.g. %{urlencode:<attr>}.
# force_to - Force the response to be decoded with this decoder.
# May be 'plain' (creates reply:REST-HTTP-Body), 'post'
# or 'json'.
# tls - TLS settings for HTTPS.
# auth - HTTP auth method to use, one of 'none', 'srp', 'basic',
# 'digest', 'digest-ie', 'gss-negotiate', 'ntlm',
# 'ntlm-winbind', 'any', 'safe'. defaults to 'none'.
# username - User to authenticate as, will be expanded.
# password - Password to use for authentication, will be expanded.
# require_auth - Require HTTP authentication.
# timeout - HTTP request timeout in seconds, defaults to 4.0.
# chunk - Chunk size to use. If set, HTTP chunked encoding is used to
# send data to the REST server. Make sure that this is large
# enough to fit your largest attribute value's text
#  representation.
# A number like 8192 is good.
#
# Additional HTTP headers may be specified with control:REST-HTTP-Header.
# The values of those attributes should be in the format:
#
# control:REST-HTTP-Header := "<HTTP attribute>: <value>"
#
# The control:REST-HTTP-Header attributes will be consumed
# (i.e. deleted) after each call to the rest module, and each
# %{rest:} expansion. This is so that headers from one REST
# call do not affect headers from a different REST call.
#
# Body encodings are the same for requests and responses
#
# POST - All attributes and values are urlencoded
# [outer.][<list>:]<attribute0>=<value0>&[outer.][<list>:]<attributeN>=<valueN>
#
# JSON - All attributes and values are escaped according to the JSON specification
# - attribute Name of the attribute.
# - attr_num Number of the attribute. Only available if the configuration item
# 'attr_num' is enabled.
# - type Type of the attribute (e.g. "integer", "string", "ipaddr", "octets", ...).
# - value Attribute value, for enumerated attributes the human readable value is
# provided and not the numeric value (Depends on the 'raw_value' config item).
# {
# "<attribute0>":{
# "attr_num":<attr_num0>,
# "type":"<type0>",
# "value":[<value0>,<value1>,<valueN>]
# },
# "<attribute1>":{
# "attr_num":<attr_num1>,
# "type":"<type1>",
# "value":[...]
# },
# "<attributeN>":{
# "attr_num":<attr_numN>,
# "type":"<typeN>",
# "value":[...]
# },
# }
#
# The response format adds three optional fields:
# - do_xlat If true, any values will be xlat expanded. Defaults to true.
# - is_json If true, any nested JSON data will be copied to the attribute
# in string form. Defaults to true.
# - op Controls how the attribute is inserted into the target list.
# Defaults to ':='. To create multiple attributes from multiple
# values, this should be set to '+=', otherwise only the last
# value will be used, and it will be assigned to a single
# attribute.
# {
# "<attribute0>":{
# "is_json":<bool>,
# "do_xlat":<bool>,
# "op":"<operator>",
# "value":[<value0>,<value1>,<valueN>]
# },
# "<attribute1>":"value",
# "<attributeN>":{
# "value":[<value0>,<value1>,<valueN>],
# "op":"+="
# }
# }
#
# Module return codes are determined by HTTP response codes. These vary depending on the
# section.
#
# If the body is processed and found to be malformed or unsupported fail will be returned.
# If the body is processed and found to contain attribute updated will be returned,
# except in the case of a 401 code.
#
# Authorize/Authenticate
#
# Code Meaning Process body Module code
# 404 not found no notfound
# 410 gone no notfound
# 403 forbidden no userlock
# 401 unauthorized yes reject
# 204 no content no ok
# 2xx successful yes ok/updated
# 5xx server error no fail
# xxx - no invalid
#
# The status code is held in %{reply:REST-HTTP-Status-Code}.
#
authorize {
uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?action=authorize"
method = 'get'
tls = ${..tls}
}
authenticate {
uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?action=authenticate"
method = 'get'
tls = ${..tls}
}
# Preacct/Accounting/Post-auth/Pre-Proxy/Post-Proxy
#
# Code Meaning Process body Module code
# 204 no content no ok
# 2xx successful yes ok/updated
# 5xx server error no fail
# xxx - no invalid
preacct {
uri = "${..connect_uri}/user/%{User-Name}/sessions/%{Acct-Unique-Session-ID}?action=preacct"
method = 'post'
tls = ${..tls}
}
accounting {
uri = "${..connect_uri}/user/%{User-Name}/sessions/%{Acct-Unique-Session-ID}?action=accounting"
method = 'post'
tls = ${..tls}
}
post-auth {
uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?action=post-auth"
method = 'post'
tls = ${..tls}
}
pre-proxy {
uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?action=pre-proxy"
method = 'post'
tls = ${..tls}
}
post-proxy {
uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?action=post-proxy"
method = 'post'
tls = ${..tls}
}
#
# The connection pool is new for 3.0, and will be used in many
# modules, for all kinds of connection-related activity.
#
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# web service being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set. This should be less than or equal to "max" above.
spare = ${thread[pool].max_spare_servers}
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The number of seconds to wait after the server tries
# to open a connection, and fails. During this time,
# no new connections will be opened.
retry_delay = 30
# The lifetime (in seconds) of the connection
lifetime = 0
# idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
idle_timeout = 60
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
}

View file

@ -1,16 +0,0 @@
# -*- text -*-
#
# $Id: d5ad2a06c767f07722dc9b9c4b13d00c26b5a280 $
# An example configuration for using /etc/smbpasswd.
#
# See the "passwd" file for documentation on the configuration items
# for this module.
#
passwd smbpasswd {
filename = /etc/smbpasswd
format = "*User-Name::LM-Password:NT-Password:SMB-Account-CTRL-TEXT::"
hash_size = 100
ignore_nislike = no
allow_multiple_keys = no
}

View file

@ -1,94 +0,0 @@
# -*- text -*-
#
# $Id: 3be32b85f56a84725fe1a6bf508e459dbe6c4e02 $
# SMS One-time Password system.
#
# This module will extend FreeRadius with a socks interface to create and
# validate One-Time-Passwords. The program for that creates the socket
# and interacts with this module is not included here.
#
# The module does not check the User-Password, this should be done with
# the "pap" module. See the example below.
#
# The module must be used in the "authorize" section to set
# Auth-Type properly. The first time through, the module is called
# in the "authenticate" section to authenticate the user password, and
# to send the challenge. The second time through, it authenticates
# the response to the challenge. e.g.:
#
# authorize {
# ...
# smsotp
# ...
# }
#
# authenticate {
# ...
# Auth-Type smsotp {
# pap
# smsotp
# }
#
# Auth-Type smsotp-reply {
# smsotp
# }
# ...
# }
#
smsotp {
# The location of the socket.
socket = "/var/run/smsotp_socket"
# Defines the challenge message that will be send to the
# NAS. Default is "Enter Mobile PIN" }
challenge_message = "Enter Mobile PIN:"
# Defines the Auth-Type section that is run for the response to
# the challenge. Default is "smsotp-reply".
challenge_type = "smsotp-reply"
# Control how many sockets are used to talk to the SMSOTPd
#
pool {
# Number of connections to start
start = 5
# Minimum number of connections to keep open
min = 4
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
max = 10
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set.
spare = 3
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The lifetime (in seconds) of the connection
lifetime = 0
# idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
idle_timeout = 60
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
}

View file

@ -1,12 +0,0 @@
# -*- text -*-
#
# $Id: 3a96622cc938f558b023e1110769a46861716a12 $
#
# The "sometimes" module is here for debugging purposes. Each instance
# randomly returns the configured result, or "noop".
#
# It is based on the "always" module.
sometimes {
rcode = fail
}

View file

@ -1,366 +0,0 @@
# -*- text -*-
##
## mods-available/sql -- SQL modules
##
## $Id: cfeac63ea87c30fead8457af6d10f5c3a0f48aef $
######################################################################
#
# Configuration for the SQL module
#
# The database schemas and queries are located in subdirectories:
#
# sql/<DB>/main/schema.sql Schema
# sql/<DB>/main/queries.conf Authorisation and Accounting queries
#
# Where "DB" is mysql, mssql, oracle, or postgresql.
#
# The name used to query SQL is sql_user_name, which is set in the file
#
# raddb/mods-config/sql/main/${dialect}/queries.conf
#
# If you are using realms, that configuration should be changed to use
# the Stripped-User-Name attribute. See the comments around sql_user_name
# for more information.
#
sql {
#
# The dialect of SQL being used.
#
# Allowed dialects are:
#
# mssql
# mysql
# oracle
# postgresql
# sqlite
# mongo
#
dialect = "sqlite"
#
# The driver module used to execute the queries. Since we
# don't know which SQL drivers are being used, the default is
# "rlm_sql_null", which just logs the queries to disk via the
# "logfile" directive, below.
#
# In order to talk to a real database, delete the next line,
# and uncomment the one after it.
#
# If the dialect is "mssql", then the driver should be set to
# one of the following values, depending on your system:
#
# rlm_sql_db2
# rlm_sql_firebird
# rlm_sql_freetds
# rlm_sql_iodbc
# rlm_sql_unixodbc
#
driver = "rlm_sql_null"
# driver = "rlm_sql_${dialect}"
#
# Driver-specific subsections. They will only be loaded and
# used if "driver" is something other than "rlm_sql_null".
# When a real driver is used, the relevant driver
# configuration section is loaded, and all other driver
# configuration sections are ignored.
#
sqlite {
# Path to the sqlite database
filename = "/tmp/freeradius.db"
# How long to wait for write locks on the database to be
# released (in ms) before giving up.
busy_timeout = 200
# If the file above does not exist and bootstrap is set
# a new database file will be created, and the SQL statements
# contained within the bootstrap file will be executed.
bootstrap = "${modconfdir}/${..:name}/main/sqlite/schema.sql"
}
mysql {
# If any of the files below are set, TLS encryption is enabled
tls {
ca_file = "/etc/ssl/certs/my_ca.crt"
ca_path = "/etc/ssl/certs/"
certificate_file = "/etc/ssl/certs/private/client.crt"
private_key_file = "/etc/ssl/certs/private/client.key"
cipher = "DHE-RSA-AES256-SHA:AES128-SHA"
tls_required = yes
tls_check_cert = no
tls_check_cert_cn = no
}
# If yes, (or auto and libmysqlclient reports warnings are
# available), will retrieve and log additional warnings from
# the server if an error has occured. Defaults to 'auto'
warnings = auto
}
postgresql {
# unlike MySQL, which has a tls{} connection configuration, postgresql
# uses its connection parameters - see the radius_db option below in
# this file
# Send application_name to the postgres server
# Only supported in PG 9.0 and greater. Defaults to no.
send_application_name = yes
}
#
# Configuration for Mongo.
#
# Note that the Mongo driver is experimental. The FreeRADIUS developers
# are unable to help with the syntax of the Mongo queries. Please see
# the Mongo documentation for that syntax.
#
# The Mongo driver supports only the following methods:
#
# aggregate
# findAndModify
# findOne
# insert
#
# For examples, see the query files:
#
# raddb/mods-config/sql/main/mongo/queries.conf
# raddb/mods-config/sql/main/ippool/queries.conf
#
# In order to use findAndModify with an aggretation pipleline, make
# sure that you are running MongoDB version 4.2 or greater. FreeRADIUS
# assumes that the paramaters passed to the methods are supported by the
# version of MongoDB which it is connected to.
#
mongo {
#
# The application name to use.
#
appname = "freeradius"
#
# The TLS parameters here map directly to the Mongo TLS configuration
#
tls {
certificate_file = /path/to/file
certificate_password = "password"
ca_file = /path/to/file
ca_dir = /path/to/directory
crl_file = /path/to/file
weak_cert_validation = false
allow_invalid_hostname = false
}
}
# Connection info:
#
# server = "localhost"
# port = 3306
# login = "radius"
# password = "radpass"
# Connection info for Mongo
# Authentication Without SSL
# server = "mongodb://USER:PASSWORD@192.16.0.2:PORT/DATABASE?authSource=admin&ssl=false"
# Authentication With SSL
# server = "mongodb://USER:PASSWORD@192.16.0.2:PORT/DATABASE?authSource=admin&ssl=true"
# Authentication with Certificate
# Use this command for retrieve Derived username:
# openssl x509 -in mycert.pem -inform PEM -subject -nameopt RFC2253
# server = mongodb://<DERIVED USERNAME>@192.168.0.2:PORT/DATABASE?authSource=$external&ssl=true&authMechanism=MONGODB-X509
# Database table configuration for everything except Oracle
radius_db = "radius"
# If you are using Oracle then use this instead
# radius_db = "(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=localhost)(PORT=1521))(CONNECT_DATA=(SID=your_sid)))"
# If you're using postgresql this can also be used instead of the connection info parameters
# radius_db = "dbname=radius host=localhost user=radius password=raddpass"
# Postgreql doesn't take tls{} options in its module config like mysql does - if you want to
# use SSL connections then use this form of connection info parameter
# radius_db = "host=localhost port=5432 dbname=radius user=radius password=raddpass sslmode=verify-full sslcert=/etc/ssl/client.crt sslkey=/etc/ssl/client.key sslrootcert=/etc/ssl/ca.crt"
# If you want both stop and start records logged to the
# same SQL table, leave this as is. If you want them in
# different tables, put the start table in acct_table1
# and stop table in acct_table2
acct_table1 = "radacct"
acct_table2 = "radacct"
# Allow for storing data after authentication
postauth_table = "radpostauth"
# Tables containing 'check' items
authcheck_table = "radcheck"
groupcheck_table = "radgroupcheck"
# Tables containing 'reply' items
authreply_table = "radreply"
groupreply_table = "radgroupreply"
# Table to keep group info
usergroup_table = "radusergroup"
# If set to 'yes' (default) we read the group tables unless Fall-Through = no in the reply table.
# If set to 'no' we do not read the group tables unless Fall-Through = yes in the reply table.
# read_groups = yes
# If set to 'yes' (default) we read profiles unless Fall-Through = no in the groupreply table.
# If set to 'no' we do not read profiles unless Fall-Through = yes in the groupreply table.
# read_profiles = yes
# Remove stale session if checkrad does not see a double login
delete_stale_sessions = yes
# Write SQL queries to a logfile. This is potentially useful for tracing
# issues with authorization queries. See also "logfile" directives in
# mods-config/sql/main/*/queries.conf. You can enable per-section logging
# by enabling "logfile" there, or global logging by enabling "logfile" here.
#
# Per-section logging can be disabled by setting "logfile = ''"
# logfile = ${logdir}/sqllog.sql
# Set the maximum query duration and connection timeout
# for rlm_sql_mysql.
# query_timeout = 5
# As of version 3.0, the "pool" section has replaced the
# following configuration items:
#
# num_sql_socks
# connect_failure_retry_delay
# lifetime
# max_queries
#
# The connection pool is new for 3.0, and will be used in many
# modules, for all kinds of connection-related activity.
#
# When the server is not threaded, the connection pool
# limits are ignored, and only one connection is used.
#
# If you want to have multiple SQL modules re-use the same
# connection pool, use "pool = name" instead of a "pool"
# section. e.g.
#
# sql sql1 {
# ...
# pool {
# ...
# }
# }
#
# # sql2 will use the connection pool from sql1
# sql sql2 {
# ...
# pool = sql1
# }
#
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# database being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Spare connections to be left idle
#
# NOTE: Idle connections WILL be closed if "idle_timeout"
# is set. This should be less than or equal to "max" above.
spare = ${thread[pool].max_spare_servers}
# Number of uses before the connection is closed
#
# 0 means "infinite"
uses = 0
# The number of seconds to wait after the server tries
# to open a connection, and fails. During this time,
# no new connections will be opened.
retry_delay = 30
# The lifetime (in seconds) of the connection
lifetime = 0
# idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
idle_timeout = 60
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
# Set to 'yes' to read radius clients from the database ('nas' table)
# Clients will ONLY be read on server startup.
#
# A client can be link to a virtual server via the SQL
# module. This link is done via the following process:
#
# If there is no listener in a virtual server, SQL clients
# are added to the global list for that virtual server.
#
# If there is a listener, and the first listener does not
# have a "clients=..." configuration item, SQL clients are
# added to the global list.
#
# If there is a listener, and the first one does have a
# "clients=..." configuration item, SQL clients are added to
# that list. The client { ...} ` configured in that list are
# also added for that listener.
#
# The only issue is if you have multiple listeners in a
# virtual server, each with a different client list, then
# the SQL clients are added only to the first listener.
#
# read_clients = yes
# Table to keep radius client info
client_table = "nas"
#
# The group attribute specific to this instance of rlm_sql
#
# This entry should be used for additional instances (sql foo {})
# of the SQL module.
# group_attribute = "${.:instance}-SQL-Group"
# This entry should be used for the default instance (sql {})
# of the SQL module.
group_attribute = "SQL-Group"
# Read database-specific queries
$INCLUDE ${modconfdir}/${.:name}/main/${dialect}/queries.conf
}

View file

@ -1,49 +0,0 @@
# Configuration for the SQL based Map (rlm_sql_map)
sql_map {
# SQL instance to use (from mods-available/sql)
#
# If you have multiple sql instances, such as "sql sql1 {...}",
# use the *instance* name here: sql1.
sql_module_instance = "sql"
# This is duplicative of info available in the SQL module, but
# we have to list it here as we do not yet support nested
# reference expansions.
dialect = "mysql"
# Name of the check item attribute to be used as a key in the SQL queries
query = "SELECT ... FROM ... "
#
# Mapping of SQL columns to RADIUS dictionary attributes.
#
# WARNING: Although this format is almost identical to the unlang
# update section format, it does *NOT* mean that you can use other
# unlang constructs in module configuration files.
#
# Configuration items are in the format:
# <radius attr> <op> <sql column number>
#
# Where:
# <radius attr>: Is the destination RADIUS attribute
# with any valid list and request qualifiers.
# <op>: Is any assignment attribute (=, :=, +=, -=).
# <column num>: The column number (not name), starting from 0
#
# Request and list qualifiers may also be placed after the 'update'
# section name to set defaults destination requests/lists
# for unqualified RADIUS attributes.
#
update {
control:Password-With-Header += 0
# control:NT-Password := 1
# reply:Reply-Message := 2
# reply:Tunnel-Type := 3
# reply:Tunnel-Medium-Type := 4
# reply:Tunnel-Private-Group-ID := 5
}
# If the 'query' results in multiple rows, it creates the <radius attr>[*] array entry.
# multiple_rows = yes
}

View file

@ -1,115 +0,0 @@
# Rather than maintaining separate (GDBM) databases of
# accounting info for each counter, this module uses the data
# stored in the raddacct table by the sql modules. This
# module NEVER does any database INSERTs or UPDATEs. It is
# totally dependent on the SQL module to process Accounting
# packets.
#
# The sql-module-instance' parameter holds the instance of the sql
# module to use when querying the SQL database. Normally it
# is just "sql". If you define more and one SQL module
# instance (usually for failover situations), you can
# specify which module has access to the Accounting Data
# (radacct table).
#
# The 'reset' parameter defines when the counters are all
# reset to zero. It can be hourly, daily, weekly, monthly or
# never. It can also be user defined. It should be of the
# form:
# num[hdwm] where:
# h: hours, d: days, w: weeks, m: months
# If the letter is ommited days will be assumed. In example:
# reset = 10h (reset every 10 hours)
# reset = 12 (reset every 12 days)
#
# The 'key' parameter specifies the unique identifier for the
# counter records (usually 'User-Name').
#
# The 'query' parameter specifies the SQL query used to get
# the current Counter value from the database. There are 2
# parameters that can be used in the query:
# %%b unix time value of beginning of reset period
# %%e unix time value of end of reset period
#
# The 'check_name' parameter is the name of the 'check'
# attribute to use to access the counter in the 'users' file
# or SQL radcheck or radgroupcheck tables.
#
# DEFAULT Max-Daily-Session > 3600, Auth-Type = Reject
# Reply-Message = "You've used up more than one hour today"
#
# The "dailycounter" (or any other sqlcounter module) should be added
# to "post-auth" section. It will then update the Session-Timeout
# attribute in the reply. If there is no Session-Timeout attribute,
# the module will add one. If there is an attribute, the sqlcounter
# module will make sure that the value is no higher than the limit.
#
sqlcounter dailycounter {
sql_module_instance = sql
dialect = ${modules.sql.dialect}
counter_name = Daily-Session-Time
check_name = Max-Daily-Session
reply_name = Session-Timeout
key = User-Name
reset = daily
$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
}
sqlcounter weeklycounter {
sql_module_instance = sql
dialect = ${modules.sql.dialect}
counter_name = Weekly-Session-Time
check_name = Max-Weekly-Session
reply_name = Session-Timeout
key = User-Name
reset = weekly
$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
}
sqlcounter monthlycounter {
sql_module_instance = sql
dialect = ${modules.sql.dialect}
counter_name = Monthly-Session-Time
check_name = Max-Monthly-Session
reply_name = Session-Timeout
key = User-Name
reset = monthly
$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
}
sqlcounter noresetcounter {
sql_module_instance = sql
dialect = ${modules.sql.dialect}
counter_name = Max-All-Session-Time
check_name = Max-All-Session
key = User-Name
reset = never
$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
}
#
# Set an account to expire T seconds after first login.
# Requires the Expire-After attribute to be set, in seconds.
# You may need to edit raddb/dictionary to add the Expire-After
# attribute.
sqlcounter expire_on_login {
sql_module_instance = sql
dialect = ${modules.sql.dialect}
counter_name = Expire-After-Initial-Login
check_name = Expire-After
key = User-Name
reset = never
$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
}

View file

@ -1,115 +0,0 @@
# Configuration for the SQL based IP Pool module (rlm_sqlippool)
#
# The database schemas are available at:
#
# raddb/mods-config/sql/ippool/<DB>/schema.sql
#
# $Id: 3d98ca9e0fca4f8df2657d53a15a2c52756b45e1 $
sqlippool {
# SQL instance to use (from mods-available/sql)
#
# If you have multiple sql instances, such as "sql sql1 {...}",
# use the *instance* name here: sql1.
sql_module_instance = "sql"
# This is duplicative of info available in the SQL module, but
# we have to list it here as we do not yet support nested
# reference expansions.
dialect = "mysql"
# Name of the check item attribute to be used as a key in the SQL queries
pool_name = "Pool-Name"
# SQL table to use for ippool range and lease info
ippool_table = "radippool"
# IP lease duration. (Leases expire even if Acct Stop packet is lost)
#
# Note that you SHOULD also set Session-Timeout to this value!
# That way the NAS will automatically kick the user offline when the
# lease expires.
#
lease_duration = 3600
#
# Timeout between each consecutive 'allocate_clear' queries (default: 1s)
# This will avoid having too many deadlock issues, especially on MySQL backend.
#
allocate_clear_timeout = 1
#
# As of 3.0.16, the 'ipv6 = yes' configuration is deprecated.
# You should use the "attribute_name" configuration item
# below, instead.
#
#
# The attribute to use for IP address assignment. The
# default is Framed-IP-Address. You can change this to any
# attribute which is IPv4 or IPv6.
#
# e.g. Framed-IPv6-Prefix, or Delegated-IPv6-Prefix.
#
# As of 3.0.16, all of the default queries have been updated to use
# this attribute_name. So you can do IPv6 address assignment simply
# by putting IPv6 addresses into the pool, and changing the following
# line to "Framed-IPv6-Prefix"
#
# Note that you MUST use separate pools for each attribute. i.e. one pool
# for Framed-IP-Address, a different one for Framed-IPv6-prefix, etc.
#
# This means configuring separate "sqlippool" instances, and different
# "ippool_table" in SQL. Then, populate the pool with addresses and
# it will all just work.
#
attribute_name = Framed-IP-Address
#
# Assign the IP address, even if the above attribute already exists
# in the reply.
#
# allow_duplicates = no
# The attribute in which an IP address hint may be supplied
req_attribute_name = Framed-IP-Address
# Attribute which should be considered unique per NAS
#
# Using NAS-Port gives behaviour similar to rlm_ippool. (And ACS)
# Using Calling-Station-Id works for NAS that send fixed NAS-Port
# ONLY change this if you know what you are doing!
pool_key = "%{NAS-Port}"
# pool_key = "%{Calling-Station-Id}"
################################################################
#
# WARNING: MySQL (MyISAM) has certain limitations that means it can
# hand out the same IP address to 2 different users.
#
# We suggest using an SQL DB with proper transaction
# support, such as PostgreSQL, or using MySQL
# with InnoDB.
#
################################################################
# These messages are added to the "control" items, as
# Module-Success-Message. They are not logged anywhere else,
# unlike previous versions. If you want to have them logged
# to a file, see the "linelog" module, and create an entry
# which writes Module-Success-Message message.
#
messages {
exists = "Existing IP: %{reply:${..attribute_name}} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
success = "Allocated IP: %{reply:${..attribute_name}} from %{control:${..pool_name}} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
clear = "Released IP %{request:${..attribute_name}} (did %{Called-Station-Id} cli %{Calling-Station-Id} user %{User-Name})"
failed = "IP Allocation FAILED from %{control:${..pool_name}} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
nopool = "No ${..pool_name} defined (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
}
$INCLUDE ${modconfdir}/sql/ippool/${dialect}/queries.conf
}

View file

@ -1,4 +0,0 @@
unbound dns {
# filename = "${raddbdir}/mods-config/unbound/default.conf"
# timeout = 3000
}

View file

@ -1,165 +0,0 @@
#
# The WiMAX module currently takes no configuration.
#
# ## Instructions for v1 and v2.0 WiMAX
#
# It should be listed in the "authorize" and "preacct" sections.
# This enables the module to fix the horrible binary version
# of Calling-Station-Id to the normal format, as specified in
# RFC 3580, Section 3.21.
#
# In order to calculate the various WiMAX keys, the module should
# be listed in the "post-auth" section. If EAP authentication
# has been used, AND the EAP method derives MSK and EMSK, then
# the various WiMAX keys can be calculated.
#
# Some useful things to remember:
#
# WiMAX-MSK = EAP MSK, but is 64 octets.
#
# MIP-RK-1 = HMAC-SHA256(ESMK, "miprk@wimaxforum.org" | 0x00020001)
# MIP-RK-2 = HMAC-SHA256(ESMK, MIP-RK-1 | "miprk@wimaxforum.org" | 0x00020002)
# MIP-RK = MIP-RK-1 | MIP-RK-2
#
# MIP-SPI = first 4 octets of HMAC-SHA256(MIP-RK, "SPI CMIP PMIP")
# plus some magic... you've got to track *all* MIP-SPI's
# on your system!
#
# SPI-CMIP4 = MIP-SPI
# SPI-PMIP4 = MIP-SPI + 1
# SPI-CMIP6 = MIP-SPI + 2
#
# MN-NAI is the Mobile node NAI. You have to create it, and put
# it into the request or reply as something like:
#
# WiMAX-MN-NAI = "%{User-Name}"
#
# You will also have to have the appropriate IP address (v4 or v6)
# in order to calculate the keys below.
#
# Lifetimes are derived from Session-Timeout. It needs to be set
# to some useful number.
#
# The hash function below H() is HMAC-SHA1.
#
#
# MN-HA-CMIP4 = H(MIP-RK, "CMIP4 MN HA" | HA-IPv4 | MN-NAI)
#
# Where HA-IPv4 is WiMAX-hHA-IP-MIP4
# or maybe WiMAX-vHA-IP-MIP4
#
# Which goes into WiMAX-MN-hHA-MIP4-Key
# or maybe WiMAX-RRQ-MN-HA-Key
# or maybe even WiMAX-vHA-MIP4-Key
#
# The corresponding SPI is SPI-CMIP4, which is MIP-SPI,
#
# which goes into WiMAX-MN-hHA-MIP4-SPI
# or maybe WiMAX-RRQ-MN-HA-SPI
# or even WiMAX-MN-vHA-MIP4-SPI
#
# MN-HA-PMIP4 = H(MIP-RK, "PMIP4 MN HA" | HA-IPv4 | MN-NAI)
# MN-HA-CMIP6 = H(MIP-RK, "CMIP6 MN HA" | HA-IPv6 | MN-NAI)
#
# both with similar comments to above for MN-HA-CMIP4.
#
# In order to tell which one to use (CMIP4, PMIP4, or CMIP6),
# you have to set WiMAX-IP-Technology in the reply to one of
# the appropriate values.
#
#
# FA-RK = H(MIP-RK, "FA-RK")
#
# MN-FA = H(FA-RK, "MN FA" | FA-IP | MN-NAI)
#
# Where does the FA-IP come from? No idea...
#
#
# The next two keys (HA-RK and FA-HA) are not generated
# for every authentication request, but only on demand.
#
# HA-RK = 160-bit random number assigned by the AAA server
# to a specific HA.
#
# FA-HA = H(HA-RK, "FA-HA" | HA-IPv4 | FA-CoAv4 | SPI)
#
# where HA-IPv4 is as above.
# and FA-CoAv4 address of the FA as seen by the HA
# and SPI is the relevant SPI for the HA-RK.
#
# DHCP-RK = 160-bit random number assigned by the AAA server
# to a specific DHCP server. vDHCP-RK is the same
# thing.
#
#
#
# ## Instructions for v2.1 (LTE) WiMAX:
#
# When called from the "authorize" this module will detect the
# presence of the following attributes:
#
# request:WiMAX-Re-synchronization-Info
# control:WiMAX-SIM-Ki
# control:WiMAX-SIM-OPc
#
# If all attributes are present, (i.e. a known SIM is requesting a
# resync) then the module will attempt to extract the new SQN and
# save it in control:WiMAX-SIM-SQN. It will also save a copy of
# RAND from the request in control:WiMAX-SIM-RAND.
#
# The resulting value of SQN can then be saved in a database
# e.g. via a call to the sql module using some unlang
#
# When called in the "post_auth" section it looks for:
#
# control:WiMAX-SIM-Ki
# control:WiMAX-SIM-OPc
# control:WiMAX-SIM-AMF
# control:WiMAX-SIM-SQN
# request:WiMAX-Visited-PLMN-ID
#
# If all these are present then it will attempt to generate the
# keys for EPS AKA.
#
# First it checks for the presence of control:WiMAX-SIM-RAND and
# if it is not present it generates a new RAND value which is
# stored in reply:WiMAX-E-UTRAN-Vector-RAND. If it is present then
# the value is simply copied to the reply attribute.
#
# Then it calls the Milenage algorithm to generate:
#
# reply:WiMAX-E-UTRAN-Vector-XRES
# reply:WiMAX-E-UTRAN-Vector-AUTN
#
# And finally generates KASME which is stored in:
# reply:WiMAX-E-UTRAN-Vector-KASME
#
#
# NOTE: It is up to the system administrator to make sure that all
# the necessary "control" attributes are populated with the
# required values. The IMSI is likely to be found in User-Name in
# the request and this can be used as the key to grab the values
# from a database.
#
#
wimax {
#
# Some WiMAX equipment requires that the MS-MPPE-*-Key
# attributes are sent in the Access-Accept, in addition to
# the WiMAX-MSK attribute.
#
# Other WiMAX equipment request that the MS-MPPE-*-Key
# attributes are NOT sent in the Access-Accept.
#
# By default, the EAP modules sends MS-MPPE-*-Key attributes.
# The default virtual server (raddb/sites-available/default)
# contains examples of adding the WiMAX-MSK.
#
# This configuration option makes the WiMAX module delete
# the MS-MPPE-*-Key attributes. The default is to leave
# them in place.
#
# If the keys are deleted (by setting this to "yes"), then
# the WiMAX-MSK attribute is automatically added to the reply.
delete_mppe_keys = no
}

View file

@ -1,158 +0,0 @@
#
# This module decrypts and validates Yubikey static and dynamic
# OTP tokens.
#
yubikey {
#
# The length (number of ASCII bytes) of the Public-ID portion
# of the OTP string.
#
# Yubikey defaults to a 6 byte ID (2 * 6 = 12)
# id_length = 12
#
# If true, the authorize method of rlm_yubikey will attempt to split the
# value of User-Password, into the user's password, and the OTP token.
#
# If enabled and successful, the value of &request:User-Password will be
# truncated and &request:Yubikey-OTP will be added.
#
# split = yes
#
# Decrypt mode - Tokens will be decrypted and processed locally
#
# The module itself does not provide persistent storage as this
# would be duplicative of functionality already in the server.
#
# Yubikey authentication needs two attributes retrieved from
# persistent storage:
# * &control:Yubikey-Key - The AES key used to decrypt the OTP data.
# The Yubikey-Public-Id and/or User-Name
# attributes may be used to retrieve the key.
# * &control:Yubikey-Counter - This is compared with the counter in the OTP
# data and used to prevent replay attacks.
# This attribute will also be available in
# the request list after successful
# decryption.
#
# Yubikey-Counter isn't strictly required, but the server will
# generate warnings if it's not present when yubikey.authenticate
# is called.
#
# These attributes are available after authorization:
# * &request:Yubikey-Public-ID - The public portion of the OTP string.
# and additionally if 'split' is set:
# * &request:Yubikey-OTP - The OTP portion of User-Password.
#
# These attributes are available after authentication (if successful):
# * &request:Yubikey-Private-ID - The encrypted ID included in OTP data,
# must be verified if tokens share keys.
# * &request:Yubikey-Counter - The last counter value (should be recorded).
# * &request:Yubikey-Timestamp - Token's internal clock (mainly useful for
# debugging).
# * &request:Yubikey-Random - Randomly generated value from the token.
#
decrypt = no
#
# Validation mode - Tokens will be validated against a Yubicloud server
#
validate = no
#
# Settings for validation mode.
#
validation {
#
# URL of validation server, multiple URL config items may be used
# to list multiple servers.
#
# - %d is a placeholder for public ID of the token
# - %s is a placeholder for the token string itself
#
# If no URLs are listed, will default to the default URLs in the
# ykclient library, which point to the yubico validation servers.
servers {
# uri = 'https://api.yubico.com/wsapi/2.0/verify?id=%d&otp=%s'
# uri = 'https://api2.yubico.com/wsapi/2.0/verify?id=%d&otp=%s'
}
#
# API Client ID
#
# Must be set to your client id for the validation server.
#
# client_id = 00000
#
# API Secret key (Base64 encoded)
#
# Must be set to your API key for the validation server.
#
# api_key = '000000000000000000000000'
#
# Connection pool parameters
#
pool {
# Connections to create during module instantiation.
# If the server cannot create specified number of
# connections during instantiation it will exit.
# Set to 0 to allow the server to start without the
# yubikey server being available.
start = ${thread[pool].start_servers}
# Minimum number of connections to keep open
min = ${thread[pool].min_spare_servers}
# Maximum number of connections
#
# If these connections are all in use and a new one
# is requested, the request will NOT get a connection.
#
# Setting 'max' to LESS than the number of threads means
# that some threads may starve, and you will see errors
# like 'No connections available and at max connection limit'
#
# Setting 'max' to MORE than the number of threads means
# that there are more connections than necessary.
max = ${thread[pool].max_servers}
# Number of uses before the connection is closed
#
# NOTE: A setting of 0 means infinite (no limit).
uses = 0
# The number of seconds to wait after the server tries
# to open a connection, and fails. During this time,
# no new connections will be opened.
retry_delay = 30
# The lifetime (in seconds) of the connection
#
# NOTE: A setting of 0 means infinite (no limit).
lifetime = 0
# The idle timeout (in seconds). A connection which is
# unused for this length of time will be closed.
#
# NOTE: A setting of 0 means infinite (no timeout).
idle_timeout = 60
# Cycle over all connections in a pool instead of concentrating
# connection use on a few connections.
spread = yes
# NOTE: All configuration settings are enforced. If a
# connection is closed because of "idle_timeout",
# "uses", or "lifetime", then the total number of
# connections MAY fall below "min". When that
# happens, it will open a new connection. It will
# also log a WARNING message.
#
# The solution is to either lower the "min" connections,
# or increase lifetime/idle_timeout.
}
}
}

View file

@ -1,22 +0,0 @@
The mods-config Directory
=========================
This directory contains module-specific configuration files. These
files are in a format different from the one used by the main
`radiusd.conf` files. Earlier versions of the server had many
module-specific files in the main `raddb` directory. The directory
contained many files, and it was not clear which files did what.
For Version 3 of FreeRADIUS, we have moved to a consistent naming
scheme. Each module-specific configuration file is placed in this
directory, in a subdirectory named for the module. Where necessary,
files in the subdirectory have been named for the processing section
where they are used.
For example, the `users` file is now located in
`mods-config/files/authorize`. That filename tells us three things:
1. The file is used in the `authorize` section.
2. The file is used by the `files` module.
3. It is a "module configuration" file, which is a specific format.

View file

@ -1,230 +0,0 @@
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
#
# Copyright 2002 The FreeRADIUS server project
# Copyright 2002 Boian Jordanov <bjordanov@orbitel.bg>
#
#
# Example code for use with rlm_perl
#
# You can use every module that comes with your perl distribution!
#
# If you are using DBI and do some queries to DB, please be sure to
# use the CLONE function to initialize the DBI connection to DB.
#
use strict;
use warnings;
# use ...
use Data::Dumper;
# Bring the global hashes into the package scope
our (%RAD_REQUEST, %RAD_REPLY, %RAD_CHECK, %RAD_STATE, %RAD_PERLCONF);
# This is hash wich hold original request from radius
#my %RAD_REQUEST;
# In this hash you add values that will be returned to NAS.
#my %RAD_REPLY;
#This is for check items
#my %RAD_CHECK;
# This is the session-sate
#my %RAD_STATE;
# This is configuration items from "config" perl module configuration section
#my %RAD_PERLCONF;
# Multi-value attributes are mapped to perl arrayrefs.
#
# update request {
# Filter-Id := 'foo'
# Filter-Id += 'bar'
# }
#
# This results to the following entry in %RAD_REQUEST:
#
# $RAD_REQUEST{'Filter-Id'} = [ 'foo', 'bar' ];
#
# Likewise, you can assign an arrayref to return multi-value attributes
#
# This the remapping of return values
#
use constant {
RLM_MODULE_REJECT => 0, # immediately reject the request
RLM_MODULE_OK => 2, # the module is OK, continue
RLM_MODULE_HANDLED => 3, # the module handled the request, so stop
RLM_MODULE_INVALID => 4, # the module considers the request invalid
RLM_MODULE_USERLOCK => 5, # reject the request (user is locked out)
RLM_MODULE_NOTFOUND => 6, # user not found
RLM_MODULE_NOOP => 7, # module succeeded without doing anything
RLM_MODULE_UPDATED => 8, # OK (pairs modified)
RLM_MODULE_NUMCODES => 9 # How many return codes there are
};
# Same as src/include/log.h
use constant {
L_AUTH => 2, # Authentication message
L_INFO => 3, # Informational message
L_ERR => 4, # Error message
L_WARN => 5, # Warning
L_PROXY => 6, # Proxy messages
L_ACCT => 7, # Accounting messages
L_DBG => 16, # Only displayed when debugging is enabled
L_DBG_WARN => 17, # Warning only displayed when debugging is enabled
L_DBG_ERR => 18, # Error only displayed when debugging is enabled
L_DBG_WARN_REQ => 19, # Less severe warning only displayed when debugging is enabled
L_DBG_ERR_REQ => 20, # Less severe error only displayed when debugging is enabled
};
# Global variables can persist across different calls to the module.
#
#
# {
# my %static_global_hash = ();
#
# sub post_auth {
# ...
# }
# ...
# }
# Function to handle authorize
sub authorize {
# For debugging purposes only
# &log_request_attributes;
# Here's where your authorization code comes
# You can call another function from here:
&test_call;
return RLM_MODULE_OK;
}
# Function to handle authenticate
sub authenticate {
# For debugging purposes only
# &log_request_attributes;
if ($RAD_REQUEST{'User-Name'} =~ /^baduser/i) {
# Reject user and tell him why
$RAD_REPLY{'Reply-Message'} = "Denied access by rlm_perl function";
return RLM_MODULE_REJECT;
} else {
# Accept user and set some attribute
if (&radiusd::xlat("%{client:group}") eq 'UltraAllInclusive') {
# User called from NAS with unlim plan set, set higher limits
$RAD_REPLY{'h323-credit-amount'} = "1000000";
} else {
$RAD_REPLY{'h323-credit-amount'} = "100";
}
return RLM_MODULE_OK;
}
}
# Function to handle preacct
sub preacct {
# For debugging purposes only
# &log_request_attributes;
return RLM_MODULE_OK;
}
# Function to handle accounting
sub accounting {
# For debugging purposes only
# &log_request_attributes;
# You can call another subroutine from here
&test_call;
return RLM_MODULE_OK;
}
# Function to handle checksimul
sub checksimul {
# For debugging purposes only
# &log_request_attributes;
return RLM_MODULE_OK;
}
# Function to handle pre_proxy
sub pre_proxy {
# For debugging purposes only
# &log_request_attributes;
return RLM_MODULE_OK;
}
# Function to handle post_proxy
sub post_proxy {
# For debugging purposes only
# &log_request_attributes;
return RLM_MODULE_OK;
}
# Function to handle post_auth
sub post_auth {
# For debugging purposes only
# &log_request_attributes;
return RLM_MODULE_OK;
}
# Function to handle xlat
sub xlat {
# For debugging purposes only
# &log_request_attributes;
# Loads some external perl and evaluate it
my ($filename,$a,$b,$c,$d) = @_;
&radiusd::radlog(L_DBG, "From xlat $filename ");
&radiusd::radlog(L_DBG,"From xlat $a $b $c $d ");
local *FH;
open FH, $filename or die "open '$filename' $!";
local($/) = undef;
my $sub = <FH>;
close FH;
my $eval = qq{ sub handler{ $sub;} };
eval $eval;
eval {main->handler;};
}
# Function to handle detach
sub detach {
# For debugging purposes only
# &log_request_attributes;
}
#
# Some functions that can be called from other functions
#
sub test_call {
# Some code goes here
}
sub log_request_attributes {
# This shouldn't be done in production environments!
# This is only meant for debugging!
for (keys %RAD_REQUEST) {
&radiusd::radlog(L_DBG, "RAD_REQUEST: $_ = $RAD_REQUEST{$_}");
}
}

View file

@ -1,33 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(acctsessiontime - GREATEST((%%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
FROM radacct \
WHERE username = '%{${key}}' \
AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%%b'"
#
# This query ignores calls that started in a previous
# reset period and continue into into this one. But it
# is a little easier on the SQL server
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct \
# WHERE username = '%{${key}}' \
# AND acctstarttime > FROM_UNIXTIME('%%b')"
#
# This query is the same as above, but demonstrates an
# additional counter parameter '%%e' which is the
# timestamp for the end of the period
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct \
# WHERE username = '%{${key}}' \
# AND acctstarttime BETWEEN FROM_UNIXTIME('%%b') AND FROM_UNIXTIME('%%e')"

View file

@ -1,6 +0,0 @@
query = "\
SELECT IFNULL( MAX(TIME_TO_SEC(TIMEDIFF(NOW(), acctstarttime))),0) \
FROM radacct \
WHERE UserName='%{${key}}' \
ORDER BY acctstarttime \
LIMIT 1;"

View file

@ -1,34 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(acctsessiontime - GREATEST((%%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
FROM radacct \
WHERE username='%{${key}}' \
AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%%b'"
#
# This query ignores calls that started in a previous
# reset period and continue into into this one. But it
# is a little easier on the SQL server
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct\
# WHERE username='%{${key}}' \
# AND acctstarttime > FROM_UNIXTIME('%%b')"
#
# This query is the same as above, but demonstrates an
# additional counter parameter '%%e' which is the
# timestamp for the end of the period
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct \
# WHERE username='%{${key}}' \
# AND acctstarttime BETWEEN FROM_UNIXTIME('%%b') \
# AND FROM_UNIXTIME('%%e')"

View file

@ -1,4 +0,0 @@
query = "\
SELECT IFNULL(SUM(AcctSessionTime),0) \
FROM radacct \
WHERE UserName='%{${key}}'"

View file

@ -1,11 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(acctsessiontime - GREATEST((%%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
FROM radacct \
WHERE username = '%{${key}}' \
AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%%b'"

View file

@ -1,34 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(AcctSessionTime - GREATEST((%%b - EXTRACT(epoch FROM AcctStartTime)), 0)) \
FROM radacct \
WHERE UserName='%{${key}}' \
AND EXTRACT(epoch FROM AcctStartTime) + AcctSessionTime > '%%b'"
#
# This query ignores calls that started in a previous
# reset period and continue into into this one. But it
# is a little easier on the SQL server
#
#query = "\
# SELECT SUM(AcctSessionTime) \
# FROM radacct \
# WHERE UserName='%{${key}}' \
# AND EXTRACT(epoch FROM AcctStartTime) > '%%b'"
#
# This query is the same as above, but demonstrates an
# additional counter parameter '%%e' which is the
# timestamp for the end of the period
#
#query = "\
# SELECT SUM(AcctSessionTime) \
# FROM radacct \
# WHERE UserName='%{${key}}' \
# AND EXTRACT(epoch FROM AcctStartTime) BETWEEN '%%b' \
# AND '%%e'"

View file

@ -1,6 +0,0 @@
query = "\
SELECT EXTRACT(EPOCH FROM (NOW() - acctstarttime)) \
FROM radacct \
WHERE UserName='%{${key}}' \
ORDER BY acctstarttime \
LIMIT 1;"

View file

@ -1,31 +0,0 @@
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
query = "\
SELECT SUM(AcctSessionTime - GREATEST((%%b - EXTRACT(epoch FROM AcctStartTime)), 0)) \
FROM radacct \
WHERE UserName='%{${key}}' \
AND EXTRACT(epoch FROM AcctStartTime) + AcctSessionTime > '%%b'"
#
# This query ignores calls that started in a previous
# reset period and continue into into this one. But it
# is a little easier on the SQL server
#
#query = "\
# SELECT SUM(AcctSessionTime) \
# FROM radacct \
# WHERE UserName='%{${key}}' \
# AND EXTRACT(epoch FROM AcctStartTime) > '%%b'"
#
# This query is the same as above, but demonstrates an
# additional counter parameter '%%e' which is the
# timestamp for the end of the period
#
#query = "\
# SELECT SUM(AcctSessionTime) \
# FROM radacct \
# WHERE UserName='%{${key}}' \
# AND EXTRACT(epoch FROM AcctStartTime) BETWEEN '%%b' AND '%%e'"

View file

@ -1,4 +0,0 @@
query = "\
SELECT SUM(AcctSessionTime) \
FROM radacct \
WHERE UserName='%{${key}}'"

View file

@ -1,12 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(AcctSessionTime - GREATEST((%%b - EXTRACT(epoch FROM AcctStartTime)), 0)) \
FROM radacct \
WHERE UserName='%{${key}}' \
AND EXTRACT(epoch FROM AcctStartTime) + AcctSessionTime > '%%b'"

View file

@ -1,33 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(acctsessiontime - GREATEST((%%b - strftime('%%s', acctstarttime)), 0)) \
FROM radacct \
WHERE username = '%{${key}}' \
AND (strftime('%%s', acctstarttime) + acctsessiontime) > %%b"
#
# This query ignores calls that started in a previous
# reset period and continue into into this one. But it
# is a little easier on the SQL server
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct \
# WHERE \username = '%{${key}}' \
# AND acctstarttime > %%b"
#
# This query is the same as above, but demonstrates an
# additional counter parameter '%%e' which is the
# timestamp for the end of the period
#
#query = "\
# SELECT SUM(acctsessiontime) FROM radacct \
# WHERE username = '%{${key}}' \
# AND acctstarttime BETWEEN %%b \
# AND %%e"

View file

@ -1,6 +0,0 @@
query = "\
SELECT GREATEST(strftime('%%s', NOW()) - strftime('%%s', acctstarttime), 0) AS expires \
FROM radacct \
WHERE username = '%{${key}}' \
ORDER BY acctstarttime \
LIMIT 1;"

View file

@ -1,34 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(acctsessiontime - GREATEST((%%b - strftime('%%s', acctstarttime)), 0)) \
FROM radacct \
WHERE username = '%{${key}}' AND \
(strftime('%%s', acctstarttime) + acctsessiontime) > %%b"
#
# This query ignores calls that started in a previous
# reset period and continue into into this one. But it
# is a little easier on the SQL server
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct \
# WHERE username = '%{${key}}' \
# AND acctstarttime > %%b"
#
# This query is the same as above, but demonstrates an
# additional counter parameter '%%e' which is the
# timestamp for the end of the period
#
#query = "\
# SELECT SUM(acctsessiontime) \
# FROM radacct \
# WHERE username = '%{${key}}' \
# AND acctstarttime BETWEEN %%b \
# AND %%e"

View file

@ -1,4 +0,0 @@
query = "\
SELECT IFNULL(SUM(acctsessiontime),0) \
FROM radacct \
WHERE username = '%{${key}}'"

View file

@ -1,12 +0,0 @@
#
# This query properly handles calls that span from the
# previous reset period into the current period but
# involves more work for the SQL server than those
# below
#
query = "\
SELECT SUM(acctsessiontime - GREATEST((%%b - strftime('%%s', acctstarttime)), 0)) \
FROM radacct \
WHERE username = '%{${key}}' \
AND (strftime('%%s', acctstarttime) + acctsessiontime) > %%b"

View file

@ -1,50 +0,0 @@
# -*- text -*-
#
# cui/mysql/queries.conf -- Queries to update a MySQL CUI table.
#
# $Id: f8f18cab562e7321756cd1f3411bbc9897ef3377 $
post-auth {
query = "\
INSERT IGNORE INTO ${..cui_table} \
(clientipaddress, callingstationid, username, cui, lastaccounting) \
VALUES \
('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
'%{User-Name}', '%{reply:Chargeable-User-Identity}', NULL) \
ON DUPLICATE KEY UPDATE \
lastaccounting='0000-00-00 00:00:00', \
cui='%{reply:Chargeable-User-Identity}'"
}
accounting {
reference = "%{tolower:type.%{Acct-Status-Type}.query}"
type {
start {
query = "\
UPDATE ${....cui_table} SET \
lastaccounting = CURRENT_TIMESTAMP \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
interim-update {
query ="\
UPDATE ${....cui_table} SET \
lastaccounting = CURRENT_TIMESTAMP \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
stop {
query ="\
DELETE FROM ${....cui_table} \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
}
}

View file

@ -1,9 +0,0 @@
CREATE TABLE `cui` (
`clientipaddress` varchar(46) NOT NULL default '',
`callingstationid` varchar(50) NOT NULL default '',
`username` varchar(64) NOT NULL default '',
`cui` varchar(32) NOT NULL default '',
`creationdate` timestamp NOT NULL default CURRENT_TIMESTAMP,
`lastaccounting` timestamp NOT NULL default '0000-00-00 00:00:00',
PRIMARY KEY (`username`,`clientipaddress`,`callingstationid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View file

@ -1,47 +0,0 @@
# -*- text -*-
#
# cui/postgresql/queries.conf -- Queries to update a PostgreSQL CUI table.
#
# $Id: 6c2215f0abbe5cb30658ea541d525fd7a274c547 $
post-auth {
query = "\
INSERT INTO ${..cui_table} \
(clientipaddress, callingstationid, username, cui) \
VALUES \
('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
'%{User-Name}', '%{reply:Chargeable-User-Identity}')"
}
accounting {
reference = "%{tolower:type.%{Acct-Status-Type}.query}"
type {
start {
query = "\
UPDATE ${....cui_table} SET \
lastaccounting = now() \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
interim-update {
query ="\
UPDATE ${....cui_table} SET \
lastaccounting = now() \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
stop {
query ="\
DELETE FROM ${....cui_table} \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
}
}

View file

@ -1,14 +0,0 @@
CREATE TABLE cui (
clientipaddress INET NOT NULL DEFAULT '0.0.0.0',
callingstationid varchar(50) NOT NULL DEFAULT '',
username varchar(64) NOT NULL DEFAULT '',
cui varchar(32) NOT NULL DEFAULT '',
creationdate TIMESTAMP with time zone NOT NULL default 'now()',
lastaccounting TIMESTAMP with time zone NOT NULL default '-infinity'::timestamp,
PRIMARY KEY (username, clientipaddress, callingstationid)
);
CREATE RULE postauth_query AS ON INSERT TO cui
WHERE EXISTS(SELECT 1 FROM cui WHERE (username, clientipaddress, callingstationid)=(NEW.username, NEW.clientipaddress, NEW.callingstationid))
DO INSTEAD UPDATE cui SET lastaccounting ='-infinity'::timestamp with time zone, cui=NEW.cui WHERE (username, clientipaddress, callingstationid)=(NEW.username, NEW.clientipaddress, NEW.callingstationid);

View file

@ -1,47 +0,0 @@
# -*- text -*-
#
# cui/sqlite/queries.conf -- Queries to update a sqlite CUI table.
#
# $Id: 41741eb70ae9c428ba5230aaf9d9b84f95c050a9 $
post-auth {
query = "\
INSERT OR REPLACE INTO ${..cui_table} \
(clientipaddress, callingstationid, username, cui, lastaccounting) \
VALUES \
('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
'%{User-Name}', '%{reply:Chargeable-User-Identity}', NULL)"
}
accounting {
reference = "%{tolower:type.%{Acct-Status-Type}.query}"
type {
start {
query = "\
UPDATE ${....cui_table} SET \
lastaccounting = CURRENT_TIMESTAMP \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
interim-update {
query ="\
UPDATE ${....cui_table} SET \
lastaccounting = CURRENT_TIMESTAMP \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
stop {
query ="\
DELETE FROM ${....cui_table} \
WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
AND callingstationid = '%{Calling-Station-Id}' \
AND username = '%{User-Name}' \
AND cui = '%{Chargeable-User-Identity}'"
}
}
}

View file

@ -1,9 +0,0 @@
CREATE TABLE `cui` (
`clientipaddress` varchar(46) NOT NULL default '',
`callingstationid` varchar(50) NOT NULL default '',
`username` varchar(64) NOT NULL default '',
`cui` varchar(32) NOT NULL default '',
`creationdate` timestamp NOT NULL default CURRENT_TIMESTAMP,
`lastaccounting` timestamp NOT NULL default '0000-00-00 00:00:00',
PRIMARY KEY (`username`,`clientipaddress`,`callingstationid`)
);

View file

@ -1,52 +0,0 @@
# -*- text -*-
#
# dhcp/mssql/queries.conf -- MSSQL configuration for DHCP schema (schema.sql)
#
# $Id: 8345c700465325f3cc99ad88f318f6730b07c648 $
# Safe characters list for sql queries. Everything else is replaced
# with their mime-encoded equivalents.
# The default list should be ok
# safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
#######################################################################
# Query config: Identifier
#######################################################################
# This is the identifier that will get substituted, escaped, and added
# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used
# below everywhere an identifier substitution is needed so you you can
# be sure the identifier passed from the client is escaped properly.
#
sql_user_name = "%{control:DHCP-SQL-Option-Identifier}"
#######################################################################
# Attribute Lookup Queries
#######################################################################
# These queries setup the reply items in ${dhcpreply_table} and
# ${group_reply_query}. You can use any query/tables you want, but
# the return data for each row MUST be in the following order:
#
# 0. Row ID (currently unused)
# 1. Identifier
# 2. Item Attr Name
# 3. Item Attr Value
# 4. Item Attr Operation
#######################################################################
authorize_reply_query = "\
SELECT id, Identifier, Attribute, Value, op \
FROM ${dhcpreply_table} \
WHERE Identifier = '%{SQL-User-Name}' AND Context = '%{control:DHCP-SQL-Option-Context}' \
ORDER BY id"
authorize_group_reply_query = "\
SELECT id, GroupName, Attribute, Value, op \
FROM ${groupreply_table} \
WHERE GroupName = '%{${group_attribute}}' AND Context = '%{control:DHCP-SQL-Option-Context}' \
ORDER BY id"
group_membership_query = "\
SELECT GroupName \
FROM ${dhcpgroup_table} \
WHERE Identifier='%{SQL-User-Name}' AND Context = '%{control:DHCP-SQL-Option-Context}' \
ORDER BY priority"

View file

@ -1,91 +0,0 @@
-- $Id: 8584949f50d0e5a7c736e9ad52ad95d1e1ebc28d $
--
-- MSSQL schema for DHCP for FreeRADIUS
--
-- To load:
-- isql -S db_ip_addr -d db_name -U db_login -P db_passwd -i schema.sql
--
-- Table structure for table 'dhcpgroupreply'
--
CREATE TABLE [dhcpgroupreply] (
[id] [int] IDENTITY (1, 1) NOT NULL,
[GroupName] [varchar] (64) NOT NULL,
[Attribute] [varchar] (32) NOT NULL,
[Value] [varchar] (253) NOT NULL,
[op] [char] (2) NULL,
[prio] [int] NOT NULL,
[Context] [varchar] (16) NOT NULL
) ON [PRIMARY]
GO
ALTER TABLE [dhcpgroupreply] WITH NOCHECK ADD
CONSTRAINT [DF_dhcpgroupreply_GroupName] DEFAULT ('') FOR [GroupName],
CONSTRAINT [DF_dhcpgroupreply_Attribute] DEFAULT ('') FOR [Attribute],
CONSTRAINT [DF_dhcpgroupreply_Value] DEFAULT ('') FOR [Value],
CONSTRAINT [DF_dhcpgroupreply_op] DEFAULT (null) FOR [op],
CONSTRAINT [DF_dhcpgroupreply_prio] DEFAULT (0) FOR [prio],
CONSTRAINT [DF_dhcpgroupreply_context] DEFAULT ('') FOR [Context],
CONSTRAINT [PK_dhcpgroupreply] PRIMARY KEY NONCLUSTERED
(
[id]
) ON [PRIMARY]
GO
CREATE INDEX [GroupName] ON [dhcpgroupreply]([Context],[GroupName]) ON [PRIMARY]
GO
--
-- Table structure for table 'dhcpreply'
--
CREATE TABLE [dhcpreply] (
[id] [int] IDENTITY (1, 1) NOT NULL,
[Identifier] [varchar] (64) NOT NULL,
[Attribute] [varchar] (32) NOT NULL,
[Value] [varchar] (253) NOT NULL,
[op] [char] (2) NULL,
[Context] [varchar] (16) NOT NULL
) ON [PRIMARY]
GO
ALTER TABLE [dhcpreply] WITH NOCHECK ADD
CONSTRAINT [DF_dhcpreply_Identifier] DEFAULT ('') FOR [Identifier],
CONSTRAINT [DF_dhcpreply_Attribute] DEFAULT ('') FOR [Attribute],
CONSTRAINT [DF_dhcpreply_Value] DEFAULT ('') FOR [Value],
CONSTRAINT [DF_dhcpreply_op] DEFAULT (null) FOR [op],
CONSTRAINT [DF_dhcpreply_Context] DEFAULT ('') FOR [Context],
CONSTRAINT [PK_dhcpreply] PRIMARY KEY NONCLUSTERED
(
[id]
) ON [PRIMARY]
GO
CREATE INDEX [Identifier] ON [dhcpreply]([Context],[Identifier]) ON [PRIMARY]
GO
--
-- Table structure for table 'dhcpgroup'
--
CREATE TABLE [dhcpgroup] (
[id] [int] IDENTITY (1, 1) NOT NULL,
[Identifier] [varchar] (64) NOT NULL,
[GroupName] [varchar] (64) NULL,
[Priority] [int] NULL,
[Context] [varchar] (16) NULL
) ON [PRIMARY]
GO
ALTER TABLE [dhcpgroup] WITH NOCHECK ADD
CONSTRAINT [DF_dhcpgroup_Identifier] DEFAULT ('') FOR [Identifier],
CONSTRAINT [DF_dhcpgroup_GroupName] DEFAULT ('') FOR [GroupName],
CONSTRAINT [DF_dhcpgroup_Context] DEFAULT ('') FOR [Context],
CONSTRAINT [PK_dhcpgroup] PRIMARY KEY NONCLUSTERED
(
[id]
) ON [PRIMARY]
GO
CREATE INDEX [Identifier] ON [dhcpgroup]([Context],[Identifier]) ON [PRIMARY]
GO

View file

@ -1,75 +0,0 @@
# -*- text -*-
#
# dhcp/mysql/queries.conf -- MySQL configuration for DHCP schema (schema.sql)
#
# $Id: a28037bd5e273cfc59297e86484be666b09f2f6d $
# Use the driver specific SQL escape method.
#
# If you enable this configuration item, the "safe_characters"
# configuration is ignored. FreeRADIUS then uses the PostgreSQL escape
# functions to escape input strings. The only downside to making this
# change is that the PostgreSQL escaping method is not the same the one
# used by FreeRADIUS. So characters which are NOT in the
# "safe_characters" list will now be stored differently in the database.
#
#auto_escape = yes
# Safe characters list for sql queries. Everything else is replaced
# with their mime-encoded equivalents.
# The default list should be ok
# Using 'auto_escape' is preferred
safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
#######################################################################
# Connection config
#######################################################################
# The character set is not configurable. The default character set of
# the mysql client library is used. To control the character set,
# create/edit my.cnf (typically in /etc/mysql/my.cnf or /etc/my.cnf)
# and enter
# [client]
# default-character-set = utf8
#
#######################################################################
# Query config: Identifier
#######################################################################
# This is the identifier that will get substituted, escaped, and added
# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used
# below everywhere an identifier substitution is needed so you you can
# be sure the identifier passed from the client is escaped properly.
#
sql_user_name = "%{control:DHCP-SQL-Option-Identifier}"
#######################################################################
# Attribute Lookup Queries
#######################################################################
# These queries setup the reply items in ${dhcpreply_table} and
# ${group_reply_query}. You can use any query/tables you want, but
# the return data for each row MUST be in the following order:
#
# 0. Row ID (currently unused)
# 1. Identifier
# 2. Item Attr Name
# 3. Item Attr Value
# 4. Item Attr Operation
#######################################################################
authorize_reply_query = "\
SELECT id, identifier, attribute, value, Op \
FROM ${dhcpreply_table} \
WHERE identifier = '%{SQL-User-Name}' AND context = '%{control:DHCP-SQL-Option-Context}' \
ORDER BY id"
authorize_group_reply_query = "\
SELECT id, groupname, attribute, value, op \
FROM ${groupreply_table} \
WHERE groupname = '%{${group_attribute}}' AND context = '%{control:DHCP-SQL-Option-Context}' \
ORDER BY id"
group_membership_query = "\
SELECT groupnme \
FROM ${dhcpgroup_table} \
WHERE identifier='%{SQL-User-Name}' AND context = '%{control:DHCP-SQL-Option-Context}' \
ORDER BY priority"

View file

@ -1,47 +0,0 @@
#
# $Id: 85a121a9bed9e2bb2c2d24068dca259c5c547e73 $
#
# PostgreSQL schema for DHCP for FreeRADIUS
#
#
#
# Table structure for table 'dhcpgroupreply'
#
CREATE TABLE IF NOT EXISTS dhcpgroupreply (
id int(11) unsigned NOT NULL auto_increment,
groupname varchar(64) NOT NULL default '',
attribute varchar(64) NOT NULL default '',
op char(2) NOT NULL DEFAULT '=',
value varchar(253) NOT NULL default '',
context varchar(16) NOT NULL default '',
PRIMARY KEY (id),
KEY groupname (context,groupname(32))
);
#
# Table structure for table 'dhcpreply'
#
CREATE TABLE IF NOT EXISTS dhcpreply (
id int(11) unsigned NOT NULL auto_increment,
identifier varchar(253) NOT NULL default '',
attribute varchar(64) NOT NULL default '',
op char(2) NOT NULL DEFAULT '=',
value varchar(253) NOT NULL default '',
context varchar(16) NOT NULL default '',
PRIMARY KEY (id),
KEY identifier (context,identifier(32))
);
#
# Table structure for table 'dhcpgroup'
#
CREATE TABLE IF NOT EXISTS dhcpgroup (
id int(11) unsigned NOT NULL auto_increment,
identifier varchar(253) NOT NULL default '',
groupname varchar(64) NOT NULL default '',
priority int(11) NOT NULL default '1',
context varchar(16) NOT NULL default '',
PRIMARY KEY (id),
KEY identifier (context,identifier(32))
);

View file

@ -1,21 +0,0 @@
/*
* setup.sql -- MySQL commands for creating the RADIUS user.
*
* WARNING: You should change 'localhost' and 'radpass'
* to something else. Also update raddb/mods-available/sql
* with the new RADIUS password.
*
* WARNING: This example file is untested. Use at your own risk.
* Please send any bug fixes to the mailing list.
*
* $Id: d20a82c9ccb94cc1ec609a761b6a8f44d30e48c3 $
*/
/*
* Create default administrator for RADIUS
*/
CREATE USER 'radius'@'localhost' IDENTIFIED BY 'radpass';
GRANT SELECT ON radius.dhcpreply TO 'radius'@'localhost';
GRANT SELECT ON radius.dhcpgroupreply TO 'radius'@'localhost';
GRANT SELECT ON radius.dhcpgroup TO 'radius'@'localhost';

Some files were not shown because too many files have changed in this diff Show more