+tailscale
This commit is contained in:
parent
a0a6a1c1d3
commit
80c9aed243
|
@ -1,3 +1,3 @@
|
||||||
{
|
{
|
||||||
services.espanso.enable = true;
|
#services.espanso.enable = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
to = 3481;
|
to = 3481;
|
||||||
}];
|
}];
|
||||||
};
|
};
|
||||||
|
|
||||||
programs.fish.enable = true;
|
programs.fish.enable = true;
|
||||||
age = {
|
age = {
|
||||||
secrets = {
|
secrets = {
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
./postgres.nix
|
./postgres.nix
|
||||||
./searx.nix
|
./searx.nix
|
||||||
./syncthing.nix
|
./syncthing.nix
|
||||||
|
./tailscale.nix
|
||||||
./traefik.nix
|
./traefik.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
{ pkgs, ... }: {
|
||||||
|
services.tailscale = {
|
||||||
|
enable = true;
|
||||||
|
useRoutingFeatures = "both";
|
||||||
|
};
|
||||||
|
networking.firewall = {
|
||||||
|
trustedInterfaces = [ "tailscale0" ];
|
||||||
|
allowedUDPPorts = [ config.services.tailscale.port ];
|
||||||
|
};
|
||||||
|
systemd.services.tailscale-autoconnect = {
|
||||||
|
description = "Automatic connection to Tailscale";
|
||||||
|
|
||||||
|
# make sure tailscale is running before trying to connect to tailscale
|
||||||
|
after = [ "network-pre.target" "tailscale.service" ];
|
||||||
|
wants = [ "network-pre.target" "tailscale.service" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
# set this service as a oneshot job
|
||||||
|
serviceConfig.Type = "oneshot";
|
||||||
|
|
||||||
|
# have the job run this shell script
|
||||||
|
script = with pkgs; ''
|
||||||
|
# wait for tailscaled to settle
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
# check if we are already authenticated to tailscale
|
||||||
|
status="$(${tailscale}/bin/tailscale status -json | ${jq}/bin/jq -r .BackendState)"
|
||||||
|
if [ $status = "Running" ]; then # if so, then do nothing
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# otherwise authenticate with tailscale
|
||||||
|
${tailscale}/bin/tailscale up --advertise-exit-node -authkey tskey-auth-kwmqT37CNTRL-AxiaTDedQJ6YZts2deRtH6PEBDwh9PxXK
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
|
@ -24,6 +24,7 @@ in {
|
||||||
specialisation = {
|
specialisation = {
|
||||||
external-display.configuration = {
|
external-display.configuration = {
|
||||||
system.nixos.tags = [ "Externer-Monitor" ];
|
system.nixos.tags = [ "Externer-Monitor" ];
|
||||||
|
services.xserver.videoDrivers = [ "nvidia" ];
|
||||||
hardware.nvidia.prime.offload.enable = lib.mkForce false;
|
hardware.nvidia.prime.offload.enable = lib.mkForce false;
|
||||||
hardware.nvidia.powerManagement.finegrained = lib.mkForce false;
|
hardware.nvidia.powerManagement.finegrained = lib.mkForce false;
|
||||||
};
|
};
|
||||||
|
@ -31,6 +32,7 @@ in {
|
||||||
specialisation = {
|
specialisation = {
|
||||||
dual-display.configuration = {
|
dual-display.configuration = {
|
||||||
system.nixos.tags = [ "Dual-Monitor" ];
|
system.nixos.tags = [ "Dual-Monitor" ];
|
||||||
|
services.xserver.videoDrivers = [ "nvidia" ];
|
||||||
hardware.nvidia.prime.offload.enable = lib.mkForce false;
|
hardware.nvidia.prime.offload.enable = lib.mkForce false;
|
||||||
hardware.nvidia.prime.sync.enable = lib.mkForce true;
|
hardware.nvidia.prime.sync.enable = lib.mkForce true;
|
||||||
hardware.nvidia.powerManagement.finegrained = lib.mkForce false;
|
hardware.nvidia.powerManagement.finegrained = lib.mkForce false;
|
||||||
|
@ -77,21 +79,6 @@ in {
|
||||||
|
|
||||||
# Enable networking
|
# Enable networking
|
||||||
networking.networkmanager.enable = true;
|
networking.networkmanager.enable = true;
|
||||||
networking.wg-quick.interfaces = {
|
|
||||||
wg0 = {
|
|
||||||
address = [ "10.8.0.3/24" ];
|
|
||||||
privateKeyFile = "/root/wg/peer_m3-nix/privatekey-peer_m3-nix";
|
|
||||||
dns = [ "10.88.0.1" ];
|
|
||||||
|
|
||||||
peers = [{
|
|
||||||
publicKey = "Il/nVlX2qzmZMJQ8QAKN+uQdkcK66Wt7MWZn9Vku6Tg=";
|
|
||||||
presharedKey = "sOgKQCXs+WAEpVvnkqTHlK1ItWpmP/xiexhAJ6oMBJs=";
|
|
||||||
allowedIPs = [ "0.0.0.0/0" "::/0" ];
|
|
||||||
endpoint = "wg.lanakk.com:51820";
|
|
||||||
persistentKeepalive = 25;
|
|
||||||
}];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
services.avahi = {
|
services.avahi = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -4,8 +4,9 @@
|
||||||
./flatpak.nix
|
./flatpak.nix
|
||||||
./sound.nix
|
./sound.nix
|
||||||
./udev.nix
|
./udev.nix
|
||||||
|
./tailscale.nix
|
||||||
./virtualization.nix
|
./virtualization.nix
|
||||||
./xserver.nix
|
#./xserver.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
# services.gvfs = {
|
# services.gvfs = {
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
{ pkgs, ... }: {
|
||||||
|
services.tailscale = { enable = true; };
|
||||||
|
|
||||||
|
systemd.services.tailscale-autoconnect = {
|
||||||
|
description = "Automatic connection to Tailscale";
|
||||||
|
|
||||||
|
# make sure tailscale is running before trying to connect to tailscale
|
||||||
|
after = [ "network-pre.target" "tailscale.service" ];
|
||||||
|
wants = [ "network-pre.target" "tailscale.service" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
# set this service as a oneshot job
|
||||||
|
serviceConfig.Type = "oneshot";
|
||||||
|
|
||||||
|
# have the job run this shell script
|
||||||
|
script = with pkgs; ''
|
||||||
|
# wait for tailscaled to settle
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
# check if we are already authenticated to tailscale
|
||||||
|
status="$(${tailscale}/bin/tailscale status -json | ${jq}/bin/jq -r .BackendState)"
|
||||||
|
if [ $status = "Running" ]; then # if so, then do nothing
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# otherwise authenticate with tailscale
|
||||||
|
${tailscale}/bin/tailscale up -authkey tskey-auth-kwmqT37CNTRL-AxiaTDedQJ6YZts2deRtH6PEBDwh9PxXK
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
Loading…
Reference in New Issue