-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathsetup.sh
More file actions
executable file
·2105 lines (1929 loc) · 95.9 KB
/
setup.sh
File metadata and controls
executable file
·2105 lines (1929 loc) · 95.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
# Written by Paul Clevett
# (C)Copyright Wolf Software Systems Ltd
# https://wolf.uk.com
#
#
# WolfStack Quick Install Script
# Installs WolfStack server management dashboard
# Supported: Ubuntu/Debian, Fedora/RHEL/CentOS, SLES/openSUSE, Arch Linux, IBM Power (ppc64le)
#
# Usage: curl -sSL https://raw.githubusercontent.com/wolfsoftwaresystemsltd/WolfStack/master/setup.sh | sudo bash
# curl -sSL https://raw.githubusercontent.com/wolfsoftwaresystemsltd/WolfStack/beta/setup.sh | sudo bash -s -- --beta
# sudo bash setup.sh --install-dir /mnt/usb # build & install from external drive
#
set -e
# Helper: read from /dev/tty if available, otherwise return empty (use defaults)
prompt_read() {
if [ -e /dev/tty ] && : < /dev/tty 2>/dev/null; then
read "$1" < /dev/tty 2>/dev/null || eval "$1="
else
eval "$1="
fi
}
# ─── Parse arguments ─────────────────────────────────────────────────────────
BRANCH="master"
CUSTOM_INSTALL_DIR=""
ASSUME_YES=false
AGENT_MODE=false
SKIP_PBS_BUILD=false
FORCE_PBS_BUILD=false
while [ $# -gt 0 ]; do
case "$1" in
--beta) BRANCH="beta" ;;
--yes|-y|--assume-yes) ASSUME_YES=true ;;
--agent) AGENT_MODE=true ;;
--skip-pbs-build|--no-pbs-build) SKIP_PBS_BUILD=true ;;
--build-pbs|--pbs-from-source) FORCE_PBS_BUILD=true ;;
--install-dir|--install)
if [ -n "$2" ]; then
shift
CUSTOM_INSTALL_DIR="$1"
else
echo "✗ --install-dir requires a path argument"
exit 1
fi
;;
esac
shift
done
# Existing install = upgrade. The /api/upgrade endpoint in older WolfStack
# binaries spawns this script via `curl|bash` without --yes, with stdin nulled
# out, so any interactive prompt would block the upgrade forever. If
# /etc/wolfstack exists this is an upgrade, not a fresh install — force
# unattended mode so the in-app "Upgrade" button actually completes.
if [ -d /etc/wolfstack ]; then
ASSUME_YES=true
fi
# Allow git to operate on repos owned by other users (setup.sh runs as root
# but repos may have been cloned by a regular user)
export GIT_CONFIG_COUNT=1
export GIT_CONFIG_KEY_0=safe.directory
export GIT_CONFIG_VALUE_0="*"
# ─── Architecture detection for prebuilt binaries ──────────────────────────
HOST_ARCH=$(uname -m)
case "$HOST_ARCH" in
x86_64) BINARY_ARCH="x86_64" ;;
aarch64) BINARY_ARCH="aarch64" ;;
*) BINARY_ARCH="" ;; # unsupported — will build from source
esac
# Download a prebuilt binary from GitHub Releases.
# Usage: download_prebuilt <repo> <binary_name> <dest_path>
# Returns 0 on success, 1 on failure (caller should fall back to source build)
download_prebuilt() {
local repo="$1" binary="$2" dest="$3"
if [ -z "$BINARY_ARCH" ]; then
return 1
fi
local url="https://github.com/${repo}/releases/latest/download/${binary}-${BINARY_ARCH}"
echo " Downloading prebuilt ${binary} for ${BINARY_ARCH}..."
local tmpfile="${dest}.download"
if curl -fSL --connect-timeout 15 --max-time 300 --retry 2 -o "$tmpfile" "$url" 2>&1; then
mv "$tmpfile" "$dest"
chmod +x "$dest"
echo " ✓ Downloaded prebuilt ${binary} (${BINARY_ARCH})"
return 0
else
echo " ⚠ Prebuilt binary not available — will build from source"
rm -f "$tmpfile"
return 1
fi
}
# ─── Custom install directory (for low-disk devices like Raspberry Pi) ───────
if [ -n "$CUSTOM_INSTALL_DIR" ]; then
# If given a block device, mount it
if [ -b "$CUSTOM_INSTALL_DIR" ]; then
MOUNT_DEV="$CUSTOM_INSTALL_DIR"
CUSTOM_INSTALL_DIR="/mnt/wolfstack-build"
mkdir -p "$CUSTOM_INSTALL_DIR"
if ! mountpoint -q "$CUSTOM_INSTALL_DIR" 2>/dev/null; then
echo "Mounting $MOUNT_DEV at $CUSTOM_INSTALL_DIR..."
mount "$MOUNT_DEV" "$CUSTOM_INSTALL_DIR"
fi
fi
mkdir -p "$CUSTOM_INSTALL_DIR"
# Redirect EVERYTHING to external drive: Rust toolchain, build cache, temp files
export RUSTUP_HOME="$CUSTOM_INSTALL_DIR/.rustup"
export CARGO_HOME="$CUSTOM_INSTALL_DIR/.cargo"
export TMPDIR="$CUSTOM_INSTALL_DIR/tmp"
export PATH="$CARGO_HOME/bin:$PATH"
mkdir -p "$TMPDIR"
fi
echo ""
echo " 🐺 WolfStack Installer"
echo " ─────────────────────────────────────"
if [ "$AGENT_MODE" = true ]; then
echo " Mode: Agent (cluster API only — no management UI)"
echo " Manage this node from your master server's UI after install."
else
echo " Server Management Platform"
fi
if [ "$BRANCH" != "master" ]; then
echo " Branch: $BRANCH"
fi
if [ -n "$CUSTOM_INSTALL_DIR" ]; then
echo " Install dir: $CUSTOM_INSTALL_DIR"
fi
echo ""
# ─── Must run as root ────────────────────────────────────────────────────────
if [ "$(id -u)" -ne 0 ]; then
echo "✗ This script must be run as root."
echo " Usage: sudo bash setup.sh"
echo " or: curl -sSL <url> | sudo bash"
exit 1
fi
# Detect the real user (for Rust install) when running under sudo
REAL_USER="${SUDO_USER:-root}"
REAL_HOME=$(eval echo "~$REAL_USER")
# ─── Detect package manager ─────────────────────────────────────────────────
echo "Checking system requirements..."
if command -v apt >/dev/null 2>&1; then
PKG_MANAGER="apt"
echo "✓ Detected Debian/Ubuntu (apt)"
elif command -v dnf >/dev/null 2>&1; then
PKG_MANAGER="dnf"
echo "✓ Detected Fedora/RHEL (dnf)"
elif command -v yum >/dev/null 2>&1; then
PKG_MANAGER="yum"
echo "✓ Detected RHEL/CentOS (yum)"
elif command -v zypper >/dev/null 2>&1; then
PKG_MANAGER="zypper"
echo "✓ Detected SLES/openSUSE (zypper)"
elif command -v pacman >/dev/null 2>&1; then
PKG_MANAGER="pacman"
echo "✓ Detected Arch Linux (pacman)"
else
echo "✗ Could not detect package manager (apt/dnf/yum/zypper/pacman)"
echo " Please install dependencies manually."
exit 1
fi
# ─── Pre-flight: warn about services we know will collide ──────────────────
# Adam Cogswell's feedback: WolfStack installs dnsmasq for LXC's bridge.
# On a host that already runs an authoritative resolver (Technitium,
# Pi-hole, AdGuard, bind, unbound), or a reverse proxy that owns :80/:443,
# the install can break the user's existing setup. We don't fail — we
# warn loudly and require explicit confirmation. Skip the prompt with
# --yes for unattended installs.
echo ""
echo "Pre-flight checks..."
WS_CONFLICT_FOUND=false
ws_warn() {
WS_CONFLICT_FOUND=true
echo " ⚠ $1"
}
# Helper: is a port held by a non-wolfstack process?
ws_port_holder() {
local port="$1"
if command -v ss >/dev/null 2>&1; then
ss -tnlp 2>/dev/null | awk -v p=":$port$" '$4 ~ p { for(i=1;i<=NF;i++) if($i ~ /users:/) print $i; exit }'
elif command -v netstat >/dev/null 2>&1; then
netstat -tnlp 2>/dev/null | awk -v p=":$port$" '$4 ~ p {print $7; exit}'
fi
}
# DNS-on-:53 conflicts
DNS_HOLDER=$(ws_port_holder 53)
if [ -n "$DNS_HOLDER" ]; then
case "$DNS_HOLDER" in
*Technitium*|*technitium*)
ws_warn "Technitium DNS Server is bound to :53. WolfStack installs dnsmasq for LXC; the global dnsmasq.service will be left disabled but the package install may still affect Technitium. Consider running WolfStack on a different host." ;;
*pihole*|*pihole-FTL*)
ws_warn "Pi-hole is bound to :53. Same caveat as Technitium — installing dnsmasq alongside Pi-hole's FTL can collide." ;;
*AdGuardHome*|*AdGuard*)
ws_warn "AdGuard Home is bound to :53. Installing dnsmasq alongside AdGuard can collide." ;;
*systemd-resolve*)
echo " ℹ systemd-resolved is bound to :53 (stub listener). WolfStack handles this case automatically — leaving it alone." ;;
*named*|*bind*)
ws_warn "BIND (named) is bound to :53. Installing dnsmasq alongside BIND can collide." ;;
*unbound*)
ws_warn "Unbound is bound to :53. Installing dnsmasq alongside Unbound can collide." ;;
*dnsmasq*)
: ;; # already dnsmasq — fine
*)
ws_warn "Something is already bound to :53 ($DNS_HOLDER). Installing dnsmasq may collide." ;;
esac
fi
# 8553 / 8554 / 8555 — the three ports WolfStack actually binds. 8553 is the
# management UI / API, 8554 is the inter-node HTTP listener (cluster proxy),
# 8555 is the dedicated public status-page listener. Calling all three out
# matters for firewall planning — users open just :8553 and wonder why
# inter-node sync or status pages don't work.
for port in 8553 8554 8555; do
HOLDER=$(ws_port_holder "$port")
if [ -n "$HOLDER" ] && [ "$HOLDER" != "${HOLDER#*wolfstack}" ]; then
: # our own previous instance — fine on upgrade
elif [ -n "$HOLDER" ]; then
case "$port" in
8553) ROLE="management UI / API" ;;
8554) ROLE="inter-node cluster API" ;;
8555) ROLE="public status pages" ;;
esac
ws_warn "Port $port ($ROLE) is already bound by $HOLDER. WolfStack will fail to bind it until that service is stopped or the port is moved in /etc/wolfstack/config.toml."
fi
done
# Reverse proxies on 80/443 (we don't install one by default but WolfProxy
# component does — flag so the user knows).
for port in 80 443; do
HOLDER=$(ws_port_holder "$port")
if [ -n "$HOLDER" ]; then
case "$HOLDER" in
*nginx*|*apache*|*httpd*|*caddy*|*traefik*|*haproxy*)
echo " ℹ Reverse proxy detected on :$port ($HOLDER). WolfStack core will not touch :$port — only relevant if you install WolfProxy later." ;;
esac
fi
done
# Existing /etc/wolfstack/ from a prior install. The risk: re-running setup
# preserves the OLD join-token, cluster secret, and nodes.json, which on a
# fresh box looks like an upgrade but on a moved-disk-to-new-host can mean
# the new node ends up trying to join its old cluster with stale state.
if [ -d /etc/wolfstack ] && [ "$(ls -A /etc/wolfstack 2>/dev/null)" ]; then
echo " ℹ /etc/wolfstack already exists with content — treating this as an upgrade. Existing config, cluster secret, and join token will be preserved."
if [ -f /etc/wolfstack/custom-cluster-secret ]; then
echo " • Custom cluster secret is set on this node. If joining a NEW cluster, delete /etc/wolfstack/custom-cluster-secret and /etc/wolfstack/nodes.json before adding this node from the master."
fi
fi
# Active firewall blocking 8553? Most homelab installs don't bother, but
# enterprise images ship with firewalld/ufw on. Surface it now so the user
# isn't troubleshooting "can't reach :8553" after install.
if command -v ufw >/dev/null 2>&1 && ufw status 2>/dev/null | grep -q "Status: active"; then
if ! ufw status 2>/dev/null | grep -qE "8553(/tcp)?\s+ALLOW"; then
echo " ℹ ufw is active and does not allow :8553. After install run: sudo ufw allow 8553/tcp 8554/tcp 8555/tcp"
fi
elif command -v firewall-cmd >/dev/null 2>&1 && systemctl is-active --quiet firewalld 2>/dev/null; then
if ! firewall-cmd --list-ports 2>/dev/null | grep -q "8553/tcp"; then
echo " ℹ firewalld is running and does not allow :8553. After install run: sudo firewall-cmd --permanent --add-port=8553/tcp --add-port=8554/tcp --add-port=8555/tcp && sudo firewall-cmd --reload"
fi
fi
# Architecture: prebuilt binaries only ship for x86_64 / aarch64. Anything
# else falls through to source build, which is slow and can fail. Tell the
# user upfront so they can either accept the source-build path or bail.
case "$HOST_ARCH" in
x86_64|aarch64) ;;
*)
echo " ⚠ Architecture $HOST_ARCH has no prebuilt binary — WolfStack will be compiled from source (~10–30 min, ~3 GB free disk required in /tmp and \$HOME)."
;;
esac
# Summarise what's about to happen
echo ""
echo " This will install / update on this host:"
echo " • The wolfstack binary at /usr/local/bin/wolfstack"
echo " • A systemd unit (wolfstack.service)"
echo " • Listeners bound: :8553 (management UI), :8554 (inter-node), :8555 (status pages)"
echo " • Build dependencies: git, curl, build tools, openssl headers"
echo " • Runtime dependencies: lxc, dnsmasq (binary), bridge-utils, qemu, socat, nfs-client, fuse3, s3fs"
if [ "$AGENT_MODE" = true ]; then
echo " • Agent mode: SPA disabled, but ALL runtime deps above still installed —"
echo " every node has to be able to actually run containers/VMs/storage."
fi
echo " • A cluster join token at /etc/wolfstack/join-token"
echo " • A package manifest log at /var/log/wolfstack/install-<timestamp>.log"
echo " • The uninstaller at /usr/local/bin/wolfstack-uninstall"
if [ "$WS_CONFLICT_FOUND" = true ]; then
echo ""
echo " ⚠ Conflicts above were detected. Read them carefully before continuing."
fi
# Prompt unless --yes was passed or stdin isn't a tty (curl|bash case)
if [ "$ASSUME_YES" != true ]; then
if [ -t 0 ] || [ -r /dev/tty ]; then
echo ""
printf " Proceed with install? [y/N] "
WS_REPLY=""
if [ -t 0 ]; then
read -r WS_REPLY
else
# stdin is a pipe (curl|bash) — read from /dev/tty if we have one
read -r WS_REPLY < /dev/tty 2>/dev/null || WS_REPLY=""
fi
case "$WS_REPLY" in
y|Y|yes|YES) ;;
*)
echo " Aborted. Re-run with --yes to skip this prompt for unattended installs."
exit 0
;;
esac
else
# No tty available at all (e.g. CI without --yes). Be conservative — abort.
if [ "$WS_CONFLICT_FOUND" = true ]; then
echo ""
echo " ✗ Conflicts detected and no terminal to confirm interactively."
echo " Re-run with --yes to acknowledge and proceed:"
echo " curl -sSL <url> | sudo bash -s -- --yes"
exit 1
fi
fi
fi
echo ""
# ─── Update system packages first ─────────────────────────────────────────
# Ensures package index is in sync and avoids dependency mismatches
# Refresh package index (needed to install dependencies) but do NOT upgrade existing packages.
# A full system upgrade can break things, takes ages, and the user didn't ask for it.
echo ""
echo "Refreshing package index..."
if [ "$PKG_MANAGER" = "apt" ]; then
if ! apt update -qq 2>/dev/null; then
echo " ⚠ Some repositories failed to update."
echo " This is usually caused by a third-party repo (e.g. Docker) that doesn't"
echo " support your distro version. WolfStack will still install, but you may"
echo " need to fix the broken repo afterwards:"
echo " sudo apt update (to see which repo is failing)"
echo " Check /etc/apt/sources.list.d/ for the problematic .list file"
echo " Continuing installation..."
fi
elif [ "$PKG_MANAGER" = "dnf" ]; then
dnf makecache -q 2>/dev/null || true
elif [ "$PKG_MANAGER" = "zypper" ]; then
zypper refresh -q 2>/dev/null || true
elif [ "$PKG_MANAGER" = "pacman" ]; then
pacman -Sy --noconfirm 2>/dev/null || true
fi
echo "✓ Package index refreshed"
# ─── Detect Proxmox VE host ─────────────────────────────────────────────────
IS_PROXMOX=false
if command -v pveversion >/dev/null 2>&1 || [ -f /etc/pve/.version ] || dpkg -l proxmox-ve >/dev/null 2>&1 2>&1; then
IS_PROXMOX=true
PVE_VER=$(pveversion 2>/dev/null || echo "unknown")
echo "✓ Detected Proxmox VE host ($PVE_VER)"
echo " Skipping packages already provided by Proxmox (QEMU, LXC)"
fi
# ─── Install manifest: snapshot package state BEFORE we install anything ───
# Goal: produce a record at /var/log/wolfstack/install-<timestamp>.log of
# every package that was added or upgraded by this run, so a user who needs
# to roll back has a precise list of what changed. We snapshot here (before
# any install commands run) and again right before the completion banner;
# the diff is the manifest. This catches every package-manager call in the
# script without having to wrap each one individually.
WS_MANIFEST_DIR="/var/log/wolfstack"
WS_MANIFEST_TS="$(date +%Y%m%d-%H%M%S)"
WS_MANIFEST_FILE="$WS_MANIFEST_DIR/install-$WS_MANIFEST_TS.log"
WS_PKG_BEFORE="$WS_MANIFEST_DIR/.pkg-before-$$"
WS_PKG_AFTER="$WS_MANIFEST_DIR/.pkg-after-$$"
mkdir -p "$WS_MANIFEST_DIR" 2>/dev/null || true
chmod 750 "$WS_MANIFEST_DIR" 2>/dev/null || true
ws_snapshot_packages() {
local out="$1"
: > "$out" 2>/dev/null || return 0
if command -v dpkg-query >/dev/null 2>&1; then
dpkg-query -W -f='${Package}\t${Version}\n' 2>/dev/null | sort > "$out"
elif command -v rpm >/dev/null 2>&1; then
rpm -qa --qf '%{NAME}\t%{VERSION}-%{RELEASE}\n' 2>/dev/null | sort > "$out"
elif command -v pacman >/dev/null 2>&1; then
pacman -Q 2>/dev/null | tr ' ' '\t' | sort > "$out"
fi
}
ws_snapshot_packages "$WS_PKG_BEFORE"
# ─── Install system dependencies ────────────────────────────────────────────
echo ""
echo "Installing system dependencies..."
# Snapshot dnsmasq.service state BEFORE we run the package install.
# Several distros (Fedora/RHEL/CentOS/openSUSE/Arch) ship the full
# dnsmasq package with a systemd unit that auto-starts and binds
# 0.0.0.0:53 — which collides with systemd-resolved's stub listener
# on 127.0.0.53:53 and breaks host DNS resolution. We need the dnsmasq
# BINARY for LXC's lxcbr0 (lxc-net launches its own scoped dnsmasq),
# but we don't want the global service stomping on port 53.
#
# We only auto-disable the service when WE caused it to exist — i.e.
# when the unit wasn't on disk at all before we started. If the user
# had already installed and configured dnsmasq as their system
# resolver before running setup.sh, that state is recorded here and
# we leave it strictly alone.
DNSMASQ_PRE_STATE=""
if systemctl list-unit-files dnsmasq.service >/dev/null 2>&1; then
if systemctl is-enabled --quiet dnsmasq.service 2>/dev/null; then
DNSMASQ_PRE_STATE="enabled"
elif systemctl is-active --quiet dnsmasq.service 2>/dev/null; then
DNSMASQ_PRE_STATE="active"
else
DNSMASQ_PRE_STATE="installed-disabled"
fi
fi
if [ "$PKG_MANAGER" = "apt" ]; then
apt update -qq 2>/dev/null || true
# On Proxmox hosts, QEMU and LXC are already provided by pve-qemu-kvm and lxc-pve.
# Many Debian packages conflict with PVE equivalents, causing APT to try removing
# the proxmox-ve metapackage. We must be very conservative on PVE hosts.
if [ "$IS_PROXMOX" = true ]; then
# Only install build dependencies needed for compiling Rust/WolfStack.
# Proxmox already provides QEMU, LXC, socat, bridge-utils, etc.
apt install -y --no-install-recommends git curl build-essential pkg-config libssl-dev libcrypt-dev || {
echo "⚠ Some build dependencies failed to install. Trying individually..."
for pkg in git curl build-essential pkg-config libssl-dev libcrypt-dev; do
dpkg -s "$pkg" >/dev/null 2>&1 || apt install -y --no-install-recommends "$pkg" 2>/dev/null || true
done
}
# Install optional runtime deps one-by-one — skip if already provided by PVE
for pkg in dnsmasq-base bridge-utils socat nfs-common fuse3; do
if dpkg -s "$pkg" >/dev/null 2>&1; then
echo " ✓ $pkg already installed"
else
echo " Installing $pkg..."
apt install -y --no-install-recommends "$pkg" 2>/dev/null || \
echo " ⚠ Could not install $pkg (may conflict with PVE) — skipping"
fi
done
# s3fs — try both package names (s3fs-fuse on Kali/some Debian, s3fs on Ubuntu/Proxmox)
if ! dpkg -s s3fs-fuse >/dev/null 2>&1 && ! dpkg -s s3fs >/dev/null 2>&1; then
apt install -y --no-install-recommends s3fs-fuse 2>/dev/null || \
apt install -y --no-install-recommends s3fs 2>/dev/null || \
echo " ⚠ s3fs not available — S3 mounts will use built-in sync"
fi
else
# Select architecture-appropriate QEMU package
ARCH=$(uname -m)
if [ "$ARCH" = "ppc64le" ] || [ "$ARCH" = "ppc64" ]; then
QEMU_PKG="qemu-system-ppc qemu-utils"
elif [ "$ARCH" = "aarch64" ]; then
QEMU_PKG="qemu-system-arm qemu-utils qemu-efi-aarch64"
else
QEMU_PKG="qemu-system-x86 qemu-utils"
fi
apt install -y git curl build-essential pkg-config libssl-dev libcrypt-dev lxc lxc-templates dnsmasq-base bridge-utils $QEMU_PKG socat nfs-common fuse3
apt install -y s3fs-fuse 2>/dev/null || apt install -y s3fs 2>/dev/null || echo " ⚠ s3fs not available — S3 mounts will use built-in sync"
fi
elif [ "$PKG_MANAGER" = "dnf" ]; then
ARCH=$(uname -m)
if [ "$ARCH" = "aarch64" ]; then
QEMU_DNF="qemu-system-aarch64 qemu-img edk2-aarch64"
else
QEMU_DNF="qemu-kvm qemu-img"
fi
dnf install -y git curl gcc gcc-c++ make openssl-devel pkg-config libxcrypt-devel lxc lxc-templates lxc-extra dnsmasq bridge-utils $QEMU_DNF socat s3fs-fuse nfs-utils fuse3
elif [ "$PKG_MANAGER" = "yum" ]; then
ARCH=$(uname -m)
if [ "$ARCH" = "aarch64" ]; then
QEMU_YUM="qemu-system-aarch64 qemu-img"
else
QEMU_YUM="qemu-kvm qemu-img"
fi
yum install -y git curl gcc gcc-c++ make openssl-devel pkgconfig lxc lxc-templates lxc-extra dnsmasq bridge-utils $QEMU_YUM socat s3fs-fuse nfs-utils fuse
elif [ "$PKG_MANAGER" = "zypper" ]; then
ARCH=$(uname -m)
if [ "$ARCH" = "aarch64" ]; then
QEMU_ZYPP="qemu-arm qemu-tools qemu-uefi-aarch64"
else
QEMU_ZYPP="qemu-kvm qemu-tools"
fi
zypper install -y git curl gcc gcc-c++ make libopenssl-devel pkg-config lxc dnsmasq bridge-utils $QEMU_ZYPP socat s3fs nfs-client fuse3
elif [ "$PKG_MANAGER" = "pacman" ]; then
ARCH=$(uname -m)
if [ "$ARCH" = "aarch64" ]; then
QEMU_PAC="qemu-system-aarch64 qemu-img edk2-aarch64"
else
QEMU_PAC="qemu-full"
fi
pacman -Sy --noconfirm --needed git curl base-devel openssl pkg-config lxc dnsmasq $QEMU_PAC socat s3fs-fuse nfs-utils fuse3 rustup
fi
# Decide whether to disable the freshly-installed dnsmasq.service.
# We act only when ALL of these are true:
# 1. The unit didn't exist before we ran (DNSMASQ_PRE_STATE empty)
# — i.e. we caused it to come into existence. If the user had
# dnsmasq pre-installed/configured as their system resolver, we
# leave it alone.
# 2. The unit exists now (the package install actually placed it).
# 3. systemd-resolved is active. Without it, there's no port-53
# conflict to worry about — the user may be relying on dnsmasq
# or another resolver and we shouldn't second-guess.
# 4. The unit is now active (auto-started by the install).
# When all four hold, the install just created a port-53 collision
# that breaks host DNS — disable the service to clear it. The dnsmasq
# binary stays installed so LXC's lxcbr0 can launch its own scoped
# dnsmasq via lxc-net (--bind-interfaces --interface=lxcbr0).
if [ -z "$DNSMASQ_PRE_STATE" ] \
&& systemctl list-unit-files dnsmasq.service >/dev/null 2>&1 \
&& systemctl is-active --quiet systemd-resolved 2>/dev/null \
&& systemctl is-active --quiet dnsmasq.service 2>/dev/null; then
echo " Package install auto-started dnsmasq.service (binds 0.0.0.0:53)"
echo " systemd-resolved is also active (binds 127.0.0.53:53) — these will conflict and break host DNS."
echo " Disabling dnsmasq.service. LXC's lxcbr0 gets its own scoped dnsmasq via lxc-net (unaffected)."
echo " If you intended to use dnsmasq as your system resolver, run:"
echo " sudo systemctl disable --now systemd-resolved"
echo " sudo systemctl enable --now dnsmasq"
systemctl disable --now dnsmasq.service 2>/dev/null || true
fi
echo "✓ System dependencies installed"
# ─── Install Proxmox Backup Client (optional, for PBS integration) ──────────
echo ""
echo "Installing Proxmox Backup Client..."
# Helper: detect the best PBS codename to use based on current OS
pbs_detect_codename() {
local codename=""
if [ -r /etc/os-release ]; then
. /etc/os-release
codename="${VERSION_CODENAME:-}"
fi
[ -z "$codename" ] && codename=$(lsb_release -sc 2>/dev/null || echo "")
case "$codename" in
# Debian — use as-is (proxmox publishes for these)
trixie|bookworm|bullseye) echo "$codename" ;;
# Ubuntu codenames mapped to closest Debian
noble|oracular|plucky) echo "trixie" ;; # 24.04+/25.04 → Debian 13
jammy|lunar|mantic) echo "bookworm" ;; # 22.04–23.10 → Debian 12
focal|impish) echo "bullseye" ;; # 20.04–21.10 → Debian 11
# Unknown/everything else — trixie is newest; extraction will fallback
*) echo "trixie" ;;
esac
}
# Helper: extract proxmox-backup-client binary from Debian .deb
# Used on non-Debian systems (Fedora, Arch fallback, openSUSE)
pbs_extract_from_deb() {
local codename="$1"
local arch="${2:-amd64}"
local tmp
tmp=$(mktemp -d)
local base_url="http://download.proxmox.com/debian/pbs/dists/${codename}/pbs-no-subscription/binary-${arch}/"
local deb_name
deb_name=$(curl -fsSL "$base_url" 2>/dev/null | grep -oP 'proxmox-backup-client_[^"]+\.deb' | grep -v dbgsym | sort -V | tail -1)
if [ -z "$deb_name" ]; then
rm -rf "$tmp"
return 1
fi
echo " Downloading $deb_name (${codename}/${arch})..." >&2
if ! curl -fsSL "${base_url}${deb_name}" -o "${tmp}/${deb_name}" 2>/dev/null; then
rm -rf "$tmp"; return 1
fi
( cd "$tmp" && ar x "$deb_name" 2>/dev/null ) || { rm -rf "$tmp"; return 1; }
local data_tar
data_tar=$(ls "$tmp"/data.tar.* 2>/dev/null | head -1)
[ -z "$data_tar" ] && { rm -rf "$tmp"; return 1; }
case "$data_tar" in
*.zst) zstd -d -q "$data_tar" -o "$tmp/data.tar" 2>/dev/null || { rm -rf "$tmp"; return 1; } ;;
*.xz) xz -d -k "$data_tar" 2>/dev/null || { rm -rf "$tmp"; return 1; } ;;
*.gz) gzip -dk "$data_tar" 2>/dev/null || { rm -rf "$tmp"; return 1; } ;;
esac
if ! tar -C "$tmp" -xf "$tmp/data.tar" ./usr/bin/proxmox-backup-client 2>/dev/null; then
rm -rf "$tmp"; return 1
fi
install -m 0755 "$tmp/usr/bin/proxmox-backup-client" /usr/local/bin/proxmox-backup-client
rm -rf "$tmp"
return 0
}
pbs_install_success=false
if command -v proxmox-backup-client >/dev/null 2>&1; then
echo "✓ proxmox-backup-client already installed ($(proxmox-backup-client --version 2>&1 | head -1))"
pbs_install_success=true
elif command -v apt-get >/dev/null 2>&1; then
# ─── Debian / Ubuntu / Proxmox VE ──────────────────────────────────────
CODENAME=$(pbs_detect_codename)
echo " Using Proxmox PBS repo for: $CODENAME"
mkdir -p /etc/apt/sources.list.d /etc/apt/trusted.gpg.d
echo "deb http://download.proxmox.com/debian/pbs $CODENAME pbs-no-subscription" > /etc/apt/sources.list.d/pbs-client.list
curl -fsSL "https://enterprise.proxmox.com/debian/proxmox-release-${CODENAME}.gpg" \
-o "/etc/apt/trusted.gpg.d/proxmox-release-${CODENAME}.gpg" 2>/dev/null || true
if apt-get update -qq 2>/dev/null && apt-get install -y proxmox-backup-client 2>/dev/null; then
echo "✓ proxmox-backup-client installed from $CODENAME repo"
pbs_install_success=true
elif [ "$CODENAME" != "bookworm" ]; then
echo " ⚠ $CODENAME install failed — trying bookworm repo"
echo "deb http://download.proxmox.com/debian/pbs bookworm pbs-no-subscription" > /etc/apt/sources.list.d/pbs-client.list
curl -fsSL "https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg" \
-o "/etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg" 2>/dev/null || true
if apt-get update -qq 2>/dev/null && apt-get install -y proxmox-backup-client 2>/dev/null; then
echo "✓ proxmox-backup-client installed from bookworm repo"
pbs_install_success=true
fi
fi
elif command -v pacman >/dev/null 2>&1; then
# ─── Arch / CachyOS / Manjaro ──────────────────────────────────────────
# Required libraries for the binary: libfuse3, openssl 3, acl, zstd
pacman -S --needed --noconfirm fuse3 openssl acl zstd 2>/dev/null || true
# Try AUR helper first (paru or yay) — builds clean Arch package
AUR_HELPER=""
for h in paru yay; do
if command -v "$h" >/dev/null 2>&1; then AUR_HELPER="$h"; break; fi
done
if [ -n "$AUR_HELPER" ] && [ -n "$SUDO_USER" ] && [ "$SUDO_USER" != "root" ]; then
echo " Using $AUR_HELPER to build proxmox-backup-client-bin from AUR..."
if su - "$SUDO_USER" -c "$AUR_HELPER -S --needed --noconfirm proxmox-backup-client-bin" 2>/dev/null; then
pbs_install_success=true
fi
fi
if [ "$pbs_install_success" != "true" ]; then
echo " Falling back to Debian .deb extraction..."
if pbs_extract_from_deb trixie amd64 || pbs_extract_from_deb bookworm amd64; then
echo "✓ proxmox-backup-client installed to /usr/local/bin/"
pbs_install_success=true
fi
fi
elif command -v dnf >/dev/null 2>&1; then
# ─── Fedora / RHEL / Rocky / AlmaLinux ─────────────────────────────────
dnf install -y fuse3 openssl libacl zstd 2>/dev/null || true
ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/')
if pbs_extract_from_deb trixie "$ARCH" || pbs_extract_from_deb bookworm "$ARCH"; then
echo "✓ proxmox-backup-client installed to /usr/local/bin/"
pbs_install_success=true
fi
elif command -v zypper >/dev/null 2>&1; then
# ─── openSUSE ──────────────────────────────────────────────────────────
zypper install -y fuse3 openssl libacl1 libzstd1 2>/dev/null || true
ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/')
if pbs_extract_from_deb trixie "$ARCH" || pbs_extract_from_deb bookworm "$ARCH"; then
echo "✓ proxmox-backup-client installed to /usr/local/bin/"
pbs_install_success=true
fi
else
# ─── Unknown distro — try generic .deb extract ─────────────────────────
ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/')
if pbs_extract_from_deb trixie "$ARCH" || pbs_extract_from_deb bookworm "$ARCH"; then
echo "✓ proxmox-backup-client installed to /usr/local/bin/"
pbs_install_success=true
fi
fi
# ─── Source-build fallback for architectures Proxmox doesn't ship binaries for ─
# Proxmox publishes proxmox-backup-client ONLY for amd64
# (http://download.proxmox.com/debian/pbs/dists/{bookworm,trixie}/pbs-no-subscription/
# only has binary-amd64/). Every other architecture — Raspberry Pi (aarch64),
# Apple Silicon Linux (aarch64), ARM servers (aarch64), armv7l Pis — silently
# fails the .deb extraction path above. This block builds the client crate from
# source via cargo so PBS backup destinations work on ARM hosts.
#
# Skip with --skip-pbs-build if you don't need PBS and don't want to wait
# 20-30 min on a Pi. Force a rebuild attempt with --build-pbs even on amd64.
pbs_build_from_source() {
local target_arch="${HOST_ARCH:-$(uname -m)}"
echo ""
echo " Building proxmox-backup-client from source for $target_arch..."
echo " This takes ~20-30 minutes on a Raspberry Pi 4. Re-run setup.sh"
echo " with --skip-pbs-build to skip on next upgrade if you don't need PBS."
if ! command -v cargo >/dev/null 2>&1; then
echo " ✗ Cargo (Rust toolchain) not found — cannot build from source."
echo " Install Rust first: https://rustup.rs/ then re-run setup.sh"
return 1
fi
# Build dependencies. The proxmox-backup-client crate links libssl, libacl,
# libfuse3 (for fuse-mount of pxar archives), and libsystemd. Names vary
# by distro — best-effort install, build will fail loudly if any are missing.
if command -v apt-get >/dev/null 2>&1; then
apt-get install -y --no-install-recommends \
git build-essential pkg-config clang \
libssl-dev libacl1-dev libfuse3-dev libsystemd-dev uuid-dev 2>/dev/null \
|| echo " ⚠ Some build deps may be missing — continuing anyway"
elif command -v dnf >/dev/null 2>&1; then
dnf install -y git gcc gcc-c++ make pkgconf-pkg-config clang \
openssl-devel libacl-devel fuse3-devel systemd-devel libuuid-devel 2>/dev/null \
|| echo " ⚠ Some build deps may be missing — continuing anyway"
elif command -v pacman >/dev/null 2>&1; then
pacman -S --needed --noconfirm git base-devel pkg-config clang \
openssl acl fuse3 systemd-libs util-linux 2>/dev/null \
|| echo " ⚠ Some build deps may be missing — continuing anyway"
elif command -v zypper >/dev/null 2>&1; then
zypper install -y git gcc gcc-c++ make pkg-config clang \
libopenssl-devel libacl-devel fuse3-devel systemd-devel libuuid-devel 2>/dev/null \
|| echo " ⚠ Some build deps may be missing — continuing anyway"
fi
local src="${CUSTOM_INSTALL_DIR:-/var/cache/wolfstack}/proxmox-backup-src"
rm -rf "$src"
mkdir -p "$(dirname "$src")"
if ! git clone --depth 1 https://git.proxmox.com/git/proxmox-backup.git "$src" 2>&1 | tail -3; then
echo " ✗ Could not clone proxmox-backup source from git.proxmox.com"
rm -rf "$src"
return 1
fi
# Build only the client crate. The wider workspace pulls in PBS-server
# crates (proxmox-backup-server, daemons, web UI assets) that need
# extra build infrastructure we don't want to drag in for the client.
# Re-use the swap created earlier in the wolfstack source-build block
# if memory is tight (1GB Pi 3, 2GB Pi 4).
local cargo_jobs=""
local total_kb
total_kb=$(grep MemTotal /proc/meminfo 2>/dev/null | awk '{print $2}')
if [ -n "$total_kb" ] && [ "$total_kb" -lt 4000000 ]; then
cargo_jobs="-j 1"
echo " Low memory detected — limiting build to one job"
fi
if ! ( cd "$src" && cargo build --release $cargo_jobs --bin proxmox-backup-client ) 2>&1 | tail -15; then
echo " ✗ cargo build failed — see output above"
echo " The proxmox-backup workspace can be sensitive to system library"
echo " versions. If openssl-sys, libfuse-sys, or proxmox-* crates"
echo " failed, install the matching -devel/-dev packages and re-run."
rm -rf "$src"
return 1
fi
if [ -x "$src/target/release/proxmox-backup-client" ]; then
install -m 0755 "$src/target/release/proxmox-backup-client" /usr/local/bin/proxmox-backup-client
echo " ✓ proxmox-backup-client built and installed to /usr/local/bin/"
rm -rf "$src"
return 0
fi
rm -rf "$src"
return 1
}
if [ "$pbs_install_success" != "true" ] && [ "$SKIP_PBS_BUILD" != "true" ]; then
# Tell the user WHY the install failed (most likely: arch mismatch).
case "$HOST_ARCH" in
x86_64)
echo " ⚠ proxmox-backup-client install failed on x86_64 — unusual."
echo " Network or repo issue likely. Skipping source build (the apt"
echo " path should have worked). Re-run with --build-pbs to force"
echo " a from-source attempt anyway."
;;
aarch64|arm64|armv7l|armv6l)
echo ""
echo " ℹ Proxmox doesn't publish proxmox-backup-client binaries for $HOST_ARCH."
echo " The Debian PBS repo at download.proxmox.com only has binary-amd64/."
echo " Falling back to source build so PBS backup destinations work here."
pbs_build_from_source && pbs_install_success=true
;;
*)
echo " ℹ No upstream proxmox-backup-client binary for $HOST_ARCH."
echo " Attempting source build via cargo..."
pbs_build_from_source && pbs_install_success=true
;;
esac
fi
# Allow `--build-pbs` to force a source build on amd64 too (useful for users
# who want to track upstream master rather than the published bookworm/trixie
# .deb releases).
if [ "$pbs_install_success" = "true" ] && [ "$FORCE_PBS_BUILD" = "true" ]; then
echo " --build-pbs requested — replacing installed binary with source build"
pbs_build_from_source || true
fi
if [ "$pbs_install_success" != "true" ] && [ "$FORCE_PBS_BUILD" = "true" ]; then
pbs_build_from_source && pbs_install_success=true
fi
if [ "$pbs_install_success" != "true" ]; then
echo ""
echo " ⚠ proxmox-backup-client is NOT installed on this host."
echo " Impact: PBS backup destinations in WolfStack will not work."
echo " Workarounds — these backup destinations all work without PBS client:"
echo " • Local filesystem"
echo " • S3 / S3-compatible (Backblaze B2, MinIO, etc.)"
echo " • NFS mount"
echo " • SMB / CIFS share"
echo " • SSHFS to a remote WolfStack node"
echo " • WolfDisk replication"
echo " Manual install (if you really need PBS): https://pbs.proxmox.com/docs/backup-client.html"
fi
# Fix libfuse3 soname: proxmox-backup-client links against libfuse3.so.3
# but some distros (CachyOS, rolling Arch) have soname 4 (libfuse3.so.4)
if command -v proxmox-backup-client >/dev/null 2>&1; then
for libdir in /usr/lib /usr/lib64 /usr/lib/x86_64-linux-gnu /usr/lib/aarch64-linux-gnu; do
if [ ! -e "$libdir/libfuse3.so.3" ] && [ -e "$libdir/libfuse3.so.4" ]; then
FUSE3_REAL=$(readlink -f "$libdir/libfuse3.so.4")
ln -sf "$FUSE3_REAL" "$libdir/libfuse3.so.3"
echo " ✓ Created $libdir/libfuse3.so.3 symlink (soname compat)"
fi
done
fi
# ─── Configure FUSE for storage mounts ──────────────────────────────────────
# Enable allow_other in FUSE (needed for s3fs mounts accessible by containers)
if [ -f /etc/fuse.conf ]; then
if ! grep -q "^user_allow_other" /etc/fuse.conf; then
echo "user_allow_other" >> /etc/fuse.conf
fi
fi
# Create storage directories
# rust-s3 syncs bucket contents to /var/cache/wolfstack/s3/<mount-id>/
mkdir -p /etc/wolfstack/s3 /etc/wolfstack/pbs /mnt/wolfstack /var/cache/wolfstack/s3
echo "✓ Storage directories configured"
# Lock down /etc/wolfstack — it holds the cluster secret, PVE API
# tokens inside nodes.json, the join-token, and license.key. Before
# v18.7.27 these files were world-readable (inherited process umask),
# which let any unprivileged local user impersonate a cluster member
# or siphon PVE credentials. The running binary also enforces this
# on startup (paths::harden_existing); tightening here too closes
# the window on very first install before wolfstack has started.
chmod 700 /etc/wolfstack 2>/dev/null || true
for f in /etc/wolfstack/custom-cluster-secret \
/etc/wolfstack/cluster-secret \
/etc/wolfstack/nodes.json \
/etc/wolfstack/join-token \
/etc/wolfstack/license.key \
/etc/wolfstack/key.pem; do
if [ -e "$f" ]; then
chmod 600 "$f" 2>/dev/null || true
fi
done
# ─── Install Docker if missing ──────────────────────────────────────────────
if ! command -v docker >/dev/null 2>&1; then
echo ""
echo "Installing Docker..."
DOCKER_INSTALLED=false
# Try the official convenience script first (works for Ubuntu, Debian, Fedora, CentOS, RHEL)
if curl -fsSL https://get.docker.com | sh 2>/dev/null; then
DOCKER_INSTALLED=true
else
# Convenience script failed — likely a derivative distro (Nobara, Rocky, Alma, etc.)
# Detect the distro family from /etc/os-release and set up Docker repo manually
echo " Convenience script failed — trying manual Docker repo setup..."
DISTRO_ID=$(. /etc/os-release 2>/dev/null && echo "$ID")
DISTRO_LIKE=$(. /etc/os-release 2>/dev/null && echo "$ID_LIKE")
DISTRO_VERSION=$(. /etc/os-release 2>/dev/null && echo "$VERSION_ID")
if echo "$DISTRO_ID $DISTRO_LIKE" | grep -qiE "fedora"; then
# Fedora family (Nobara, Ultramarine, etc.) — use Fedora Docker repo
# Use the major Fedora version the derivative is based on
FEDORA_VER="$DISTRO_VERSION"
# For derivatives, try to detect the Fedora base version
if [ "$DISTRO_ID" != "fedora" ]; then
PLATFORM_ID=$(. /etc/os-release 2>/dev/null && echo "$PLATFORM_ID")
if echo "$PLATFORM_ID" | grep -q "fedora"; then
FEDORA_VER=$(echo "$PLATFORM_ID" | grep -oP 'f\K[0-9]+' || echo "$DISTRO_VERSION")
fi
fi
echo " Detected Fedora family (${DISTRO_ID} based on Fedora ${FEDORA_VER})"
dnf -y install dnf-plugins-core 2>/dev/null || true
dnf config-manager addrepo --from-repofile=https://download.docker.com/linux/fedora/docker-ce.repo 2>/dev/null || \
dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo 2>/dev/null || true
# Override $releasever with actual Fedora version for derivatives
if [ "$DISTRO_ID" != "fedora" ] && [ -n "$FEDORA_VER" ]; then
sed -i "s/\$releasever/${FEDORA_VER}/g" /etc/yum.repos.d/docker-ce.repo 2>/dev/null || true
fi
if dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; then
DOCKER_INSTALLED=true
fi
elif echo "$DISTRO_ID $DISTRO_LIKE" | grep -qiE "rhel|centos"; then
# RHEL family (Rocky, Alma, Oracle, etc.) — use CentOS Docker repo
echo " Detected RHEL/CentOS family (${DISTRO_ID})"
yum install -y yum-utils 2>/dev/null || true
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 2>/dev/null || \
dnf config-manager addrepo --from-repofile=https://download.docker.com/linux/centos/docker-ce.repo 2>/dev/null || true
if yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 2>/dev/null || \
dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 2>/dev/null; then
DOCKER_INSTALLED=true
fi
elif echo "$DISTRO_ID $DISTRO_LIKE" | grep -qiE "suse|sles"; then
# openSUSE/SLES — use distro docker packages
echo " Detected SUSE family (${DISTRO_ID})"
if zypper install -y docker docker-compose 2>/dev/null; then
DOCKER_INSTALLED=true
fi
elif echo "$DISTRO_ID $DISTRO_LIKE" | grep -qiE "debian|ubuntu"; then
# Debian derivatives (Mint, Pop!_OS, etc.) — use Debian/Ubuntu Docker repo
UPSTREAM="debian"
CODENAME=$(. /etc/os-release 2>/dev/null && echo "$UBUNTU_CODENAME")
if [ -n "$CODENAME" ]; then
UPSTREAM="ubuntu"
else
CODENAME=$(. /etc/os-release 2>/dev/null && echo "$VERSION_CODENAME")
# For rolling/derivative distros (Kali, Parrot, etc.), Docker has no matching repo
# Fall back to a known Debian stable codename
if [ -z "$CODENAME" ] || echo "$CODENAME" | grep -qiE "rolling|sid|unstable"; then
CODENAME="bookworm"
fi
fi
echo " Detected Debian family (${DISTRO_ID}, using ${UPSTREAM}/${CODENAME})"
# Try distro's own docker package first (works on Kali, Parrot, ARM, etc.)
if apt install -y docker.io 2>/dev/null; then
echo " ✓ Installed docker.io from distro repos"
DOCKER_INSTALLED=true
else
# Fall back to Docker's official repo
apt install -y ca-certificates curl gnupg 2>/dev/null
install -m 0755 -d /etc/apt/keyrings
curl -fsSL "https://download.docker.com/linux/${UPSTREAM}/gpg" | gpg --dearmor -o /etc/apt/keyrings/docker.gpg 2>/dev/null
chmod a+r /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/${UPSTREAM} ${CODENAME} stable" > /etc/apt/sources.list.d/docker.list
apt update -qq 2>/dev/null
if apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 2>/dev/null; then
DOCKER_INSTALLED=true
fi
fi
elif echo "$DISTRO_ID $DISTRO_LIKE" | grep -qiE "arch|manjaro"; then
# Arch family
echo " Detected Arch family (${DISTRO_ID})"
if pacman -Sy --noconfirm docker docker-compose 2>/dev/null; then
DOCKER_INSTALLED=true
fi
fi
fi
if [ "$DOCKER_INSTALLED" = true ]; then
systemctl enable docker 2>/dev/null || true
systemctl start docker 2>/dev/null || true
echo "✓ Docker installed"
else
echo ""
echo " ⚠ Docker could not be installed automatically."
echo " Your distro may not be directly supported by Docker's official repos."
echo " WolfStack will still work for LXC containers, VMs, and server management."
echo " To add Docker support, install it manually:"
echo " https://docs.docker.com/engine/install/"
if [ "$PKG_MANAGER" = "apt" ]; then
echo " Or try: sudo apt install docker.io (community package, may be older)"
elif [ "$PKG_MANAGER" = "dnf" ]; then
echo " Or try: sudo dnf install docker (community package)"
elif [ "$PKG_MANAGER" = "pacman" ]; then
echo " Or try: sudo pacman -S docker (community package)"
elif [ "$PKG_MANAGER" = "zypper" ]; then
echo " Or try: sudo zypper install docker (community package)"
fi
echo ""
fi
else
echo "✓ Docker already installed"
fi
# ─── Install WolfNet (cluster network layer) ────────────────────────────────
# Helper: download prebuilt WolfNet binaries or build from source.
# Sets WOLFNET_BUILT=true on success.
# Requires WOLFNET_SRC_DIR to be set if building from source.
build_or_download_wolfnet() {
# Try prebuilt first
if download_prebuilt "wolfsoftwaresystemsltd/WolfNet" "wolfnet" "/usr/local/bin/wolfnet"; then
download_prebuilt "wolfsoftwaresystemsltd/WolfNet" "wolfnetctl" "/usr/local/bin/wolfnetctl" || true
WOLFNET_BUILT=true
return 0
fi
# Fall back to source build
if [ -z "$WOLFNET_SRC_DIR" ] || [ ! -d "$WOLFNET_SRC_DIR" ]; then
echo " ✗ WolfNet source not available and no prebuilt binary — skipping"
WOLFNET_BUILT=false
return 1
fi
export PATH="${CARGO_HOME:-$REAL_HOME/.cargo}/bin:/usr/local/bin:/usr/bin:$PATH"
if ! command -v cargo >/dev/null 2>&1; then