-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsetup-coder.sh
More file actions
executable file
·1466 lines (1258 loc) · 55.1 KB
/
setup-coder.sh
File metadata and controls
executable file
·1466 lines (1258 loc) · 55.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
# ===========================================================================
# setup-coder.sh — Provision a sandboxed user for AI coding agents
# ===========================================================================
#
# Idempotent — safe to run multiple times. Each step checks whether its
# work is already done before acting.
#
# Creates an isolated Linux user that can:
# - Run Claude Code, Codex, and other AI agents safely
# - Use Podman (rootless) for containers — NO Docker socket access
# - Use the homelab CLI to expose dev servers through Traefik
# - Access GitHub with a fine-grained PAT scoped to specific repos
#
# Security boundaries:
# - Primary user's home is chmod 700 — coder cannot read personal data
# - Coder is NOT in docker or sudo groups
# - Podman runs rootless (no daemon, no privilege escalation)
# - GitHub PAT is fine-grained, limited to target repos only
#
# Configuration (via environment variables):
# CODER_USER — username to create (default: coder)
# PRIMARY_USER — user whose home to protect (default: SUDO_USER)
# DOMAIN — homelab domain (default: read from /opt/homelab/.env)
# HOMELAB_DIR — homelab directory (default: /opt/homelab)
# AITOOLS_DIR — MCP tools source (default: $PRIMARY_HOME/Programming/AITOOLS)
#
# Usage:
# sudo bash setup-coder.sh [--yes]
# sudo CODER_USER=agent PRIMARY_USER=alice DOMAIN=mylab.local bash setup-coder.sh
#
# ===========================================================================
set -euo pipefail
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
CODER_USER="${CODER_USER:-coder}"
CODER_HOME="/home/${CODER_USER}"
HOMELAB_DIR="${HOMELAB_DIR:-/opt/homelab}"
ASSUME_YES=false
# Primary user: the human account whose home directory we protect.
# Defaults to SUDO_USER (the user who ran sudo), fallback to logname.
PRIMARY_USER="${PRIMARY_USER:-${SUDO_USER:-$(logname 2>/dev/null || echo "")}}"
PRIMARY_HOME=""
if [[ -n "$PRIMARY_USER" ]]; then
PRIMARY_HOME="/home/${PRIMARY_USER}"
fi
# Domain: read from homelab .env if available, or from env var.
_read_homelab_env() {
local key="$1"
[[ -f "${HOMELAB_DIR}/.env" ]] || return 1
local line
line="$(grep -m1 "^${key}=" "${HOMELAB_DIR}/.env" 2>/dev/null || true)"
[[ -n "$line" ]] || return 1
printf '%s\n' "${line#*=}"
}
if [[ -z "${DOMAIN:-}" ]]; then
DOMAIN="$(_read_homelab_env DOMAIN 2>/dev/null || echo "")"
fi
DOMAIN="${DOMAIN:-homelab.local}"
LAN_IP="${LAN_IP:-$(_read_homelab_env LAN_IP 2>/dev/null || echo "")}"
if [[ -z "$LAN_IP" ]]; then
LAN_IP="$(ip -4 route get 1.1.1.1 2>/dev/null | awk '/src/ {for (i=1;i<=NF;i++) if ($i=="src") print $(i+1)}' | head -n1 || true)"
fi
HOSTNAME_SHORT="$(hostname -s 2>/dev/null || echo "homelab")"
# AITOOLS: source directory for MCP tools (coder, qt-mcp, cratedex).
# Defaults to $PRIMARY_HOME/Programming/AITOOLS if it exists.
if [[ -z "${AITOOLS_DIR:-}" && -n "$PRIMARY_USER" ]]; then
_candidate="/home/${PRIMARY_USER}/Programming/AITOOLS"
[[ -d "$_candidate" ]] && AITOOLS_DIR="$_candidate"
fi
AITOOLS_DIR="${AITOOLS_DIR:-}"
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
err() { echo -e "${RED}[ERR]${NC} $*" >&2; }
step() { echo -e "\n${CYAN}── $* ──${NC}"; }
confirm() {
local msg="$1"
if [[ "${ASSUME_YES}" == "true" ]]; then
return 0
fi
echo -en "${YELLOW}$msg [y/N]: ${NC}"
read -r ans
[[ "$ans" =~ ^[Yy] ]]
}
run_as_coder() {
# Use -H (set HOME) + bash -lc (login shell) instead of sudo -i.
# sudo -i re-escapes arguments for the login shell's -c option, which
# breaks compound commands (&&) by splitting them across shell contexts.
# With -H + bash -l, the -c argument is passed directly to bash by the
# OS exec, bypassing sudo's escaping entirely.
sudo -Hu "$CODER_USER" bash -lc "cd \"\$HOME\" && $1"
}
# Read/write env vars in coder's .bashrc (idempotent)
_coder_has_env() { grep -q "^export $1=" "${CODER_HOME}/.bashrc" 2>/dev/null; }
_coder_set_env() {
local name="$1" value="$2"
local bashrc="${CODER_HOME}/.bashrc"
sed -i "/^export ${name}=/d" "$bashrc"
printf 'export %s="%s"\n' "$name" "$value" >> "$bashrc"
chown "${CODER_USER}:${CODER_USER}" "$bashrc"
chmod 600 "$bashrc"
}
# ---------------------------------------------------------------------------
# Args
# ---------------------------------------------------------------------------
while [[ $# -gt 0 ]]; do
case "$1" in
--yes|-y) ASSUME_YES=true; shift ;;
-h|--help)
echo "Usage: sudo bash $0 [--yes]"
echo ""
echo "Environment variables:"
echo " CODER_USER Username to create (default: coder)"
echo " PRIMARY_USER User whose home to protect (default: \$SUDO_USER)"
echo " DOMAIN Homelab domain (default: from /opt/homelab/.env)"
echo " HOMELAB_DIR Homelab directory (default: /opt/homelab)"
exit 0
;;
*) err "Unknown argument: $1"; exit 1 ;;
esac
done
# ---------------------------------------------------------------------------
# Pre-flight
# ---------------------------------------------------------------------------
step "Pre-flight checks"
if [[ $EUID -ne 0 ]]; then
err "This script must be run as root. Try: sudo bash $0"
exit 1
fi
info "Configuration:"
info " CODER_USER: ${CODER_USER}"
info " PRIMARY_USER: ${PRIMARY_USER:-"(none)"}"
info " DOMAIN: ${DOMAIN}"
info " LAN_IP: ${LAN_IP:-"(unknown)"}"
info " HOMELAB_DIR: ${HOMELAB_DIR}"
info " AITOOLS_DIR: ${AITOOLS_DIR:-"(not set)"}"
echo ""
echo "This script will:"
echo " 1. Create the '${CODER_USER}' user with homelab group membership"
if [[ -n "$PRIMARY_HOME" ]]; then
echo " 2. Lock down ${PRIMARY_HOME} (chmod 700)"
else
echo " 2. (skip — no PRIMARY_USER set)"
fi
echo " 3. Install Podman + crun for rootless containers (with GPU ACLs if AMD GPU present)"
echo " 3.5 Install Rust ecosystem (rustup + cratedex)"
echo " 4. Install Node.js (via nvm)"
echo " 5. Install Python ecosystem (uv + ruff + qt-mcp)"
echo " 6. Configure gh CLI with a fine-grained PAT"
echo " 7. Install agent CLIs (Claude Code native, OpenAI Codex, Google Gemini)"
echo " 8. Set up MCP server config and CLAUDE.md"
echo " 9. Create workspace directory"
echo " 10. Configure SSH keys, API keys & authenticate agents"
echo " 10.6 Set up Ollama LLM inference (Podman + ROCm GPU)"
echo " 11. Run verification checks"
echo ""
if ! confirm "Proceed?"; then
info "Aborted."
exit 0
fi
# ---------------------------------------------------------------------------
# Step 1: Create coder user
# ---------------------------------------------------------------------------
step "Creating '${CODER_USER}' user"
# Ensure homelab group exists
groupadd -f homelab
if id "$CODER_USER" &>/dev/null; then
info "User '${CODER_USER}' already exists"
if ! id -nG "$CODER_USER" | grep -qw homelab; then
usermod -aG homelab "$CODER_USER"
info "Added ${CODER_USER} to homelab group"
fi
else
useradd -m -s /bin/bash -G homelab "$CODER_USER"
info "Created user '${CODER_USER}' (groups: homelab)"
fi
# Allow the primary user to read/navigate the coder home directory by adding
# them to the coder group. The coder home is 750 (rwxr-x---), so group members
# get read + execute. The reverse is NOT true — coder cannot read PRIMARY_HOME
# (chmod 700 in step 2).
if [[ -n "$PRIMARY_USER" ]]; then
if id -nG "$PRIMARY_USER" 2>/dev/null | grep -qw "$CODER_USER"; then
info "${PRIMARY_USER} already in ${CODER_USER} group"
else
usermod -aG "$CODER_USER" "$PRIMARY_USER"
info "Added ${PRIMARY_USER} to ${CODER_USER} group (read access to ${CODER_HOME})"
fi
fi
# Ensure coder home is 750 (owner: full, group: read+exec, others: none)
chmod 750 "${CODER_HOME}"
# Explicitly ensure coder is NOT in docker or sudo groups
for dangerous_group in docker sudo; do
if id -nG "$CODER_USER" 2>/dev/null | grep -qw "$dangerous_group"; then
gpasswd -d "$CODER_USER" "$dangerous_group" 2>/dev/null || true
warn "Removed ${CODER_USER} from '${dangerous_group}' group (security policy)"
fi
done
# ---------------------------------------------------------------------------
# Step 2: Lock down primary user's home directory
# ---------------------------------------------------------------------------
step "Locking down primary user home"
if [[ -n "$PRIMARY_HOME" && -d "$PRIMARY_HOME" ]]; then
current_mode="$(stat -c '%a' "$PRIMARY_HOME" 2>/dev/null || true)"
if [[ "$current_mode" == "700" ]]; then
info "${PRIMARY_HOME} already mode 700"
else
chmod 700 "$PRIMARY_HOME"
info "${PRIMARY_HOME} set to mode 700 (was ${current_mode})"
fi
elif [[ -n "$PRIMARY_HOME" ]]; then
warn "${PRIMARY_HOME} does not exist — skipping"
else
info "No PRIMARY_USER set — skipping home lockdown"
fi
# ---------------------------------------------------------------------------
# Step 3: Install Podman
# ---------------------------------------------------------------------------
step "Installing Podman (rootless containers)"
export DEBIAN_FRONTEND=noninteractive
if ! command -v podman &>/dev/null; then
apt-get update -y
apt-get install -y --no-install-recommends podman slirp4netns uidmap
info "Podman installed"
else
info "Podman already installed: $(podman --version)"
fi
# Enable linger so coder's systemd user services survive logout
loginctl enable-linger "$CODER_USER"
info "Enabled loginctl linger for ${CODER_USER}"
# Install crun — required for --group-add keep-groups in rootless containers.
# runc does not support the run.oci.keep_original_groups annotation that
# preserves host supplementary groups inside user namespaces.
if ! command -v crun &>/dev/null; then
apt-get install -y --no-install-recommends crun
info "crun installed"
else
info "crun already installed: $(crun --version | head -1)"
fi
# Configure Podman for coder user: crun runtime + cgroupfs cgroup manager.
# cgroupfs avoids the dependency on systemd --user which can be unreliable
# in SSH/sudo sessions. events_logger=file avoids journald permission issues.
_coder_containers_dir="${CODER_HOME}/.config/containers"
mkdir -p "$_coder_containers_dir"
cat > "${_coder_containers_dir}/containers.conf" << 'CONTAINERS_CONF'
[engine]
runtime = "crun"
cgroup_manager = "cgroupfs"
events_logger = "file"
CONTAINERS_CONF
chown -R "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.config"
info "Podman configured for ${CODER_USER} (crun + cgroupfs)"
# GPU access for rootless Podman (AMD ROCm).
# In rootless Podman, host GIDs (render, video) can't cross the user namespace
# boundary. Device ACLs grant access by UID instead, which works with
# --userns=keep-id since the container process retains the host UID.
if [[ -e /dev/kfd && -d /dev/dri ]]; then
_has_render_node=false
for _rn in /dev/dri/renderD*; do
[[ -e "$_rn" ]] && _has_render_node=true && break
done
if [[ "$_has_render_node" == "true" ]]; then
info "AMD GPU detected — configuring device ACLs for ${CODER_USER}"
if ! command -v setfacl &>/dev/null; then
apt-get install -y --no-install-recommends acl
fi
# Set ACLs on GPU compute devices
setfacl -m "u:${CODER_USER}:rw" /dev/kfd
for _rn in /dev/dri/renderD*; do
setfacl -m "u:${CODER_USER}:rw" "$_rn"
done
info "GPU device ACLs set for ${CODER_USER}"
# Persist ACLs across reboots via udev rule
_udev_rule="/etc/udev/rules.d/99-gpu-${CODER_USER}.rules"
cat > "$_udev_rule" << UDEV_GPU
# Grant ${CODER_USER} access to AMD GPU for rootless Podman (ROCm)
KERNEL=="kfd", SUBSYSTEM=="kfd", RUN+="/usr/bin/setfacl -m u:${CODER_USER}:rw /dev/kfd"
KERNEL=="renderD[0-9]*", SUBSYSTEM=="drm", RUN+="/usr/bin/setfacl -m u:${CODER_USER}:rw /dev/dri/%k"
UDEV_GPU
info "Udev rule installed: ${_udev_rule}"
# Auto-detect HSA_OVERRIDE_GFX_VERSION from KFD topology
if [[ -z "${HSA_OVERRIDE_GFX_VERSION:-}" ]]; then
for _props in /sys/class/kfd/kfd/topology/nodes/*/properties; do
[[ -f "$_props" ]] || continue
_gfx_ver="$(awk '/^gfx_target_version/ {print $2}' "$_props" 2>/dev/null || true)"
[[ -n "$_gfx_ver" && "$_gfx_ver" != "0" ]] || continue
HSA_OVERRIDE_GFX_VERSION="$(printf '%d.%d.%d' \
"$((_gfx_ver / 10000))" \
"$(((_gfx_ver / 100) % 100))" \
"$((_gfx_ver % 100))")"
break
done
fi
if [[ -n "${HSA_OVERRIDE_GFX_VERSION:-}" ]]; then
info "HSA_OVERRIDE_GFX_VERSION=${HSA_OVERRIDE_GFX_VERSION} (auto-detected)"
else
warn "Could not detect HSA_OVERRIDE_GFX_VERSION — set manually if ROCm fails"
fi
fi
else
info "No AMD GPU detected — skipping GPU ACL setup"
fi
# ---------------------------------------------------------------------------
# Step 3.5: Install Rust ecosystem (rustup + cratedex)
# ---------------------------------------------------------------------------
step "Installing Rust ecosystem for ${CODER_USER}"
# Ensure build dependencies are present (rustup needs curl, cratedex needs cc/pkg-config)
if ! command -v cc &>/dev/null; then
apt-get install -y --no-install-recommends build-essential pkg-config libssl-dev
fi
# Install rustup + stable toolchain
if run_as_coder 'command -v rustup' &>/dev/null; then
info "rustup already installed: $(run_as_coder 'rustup --version 2>/dev/null')"
else
info "Installing rustup + stable toolchain..."
run_as_coder 'curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --no-modify-path'
# Ensure .cargo/env is sourced from .profile
if ! grep -qs 'cargo/env' "${CODER_HOME}/.profile" 2>/dev/null; then
cat >> "${CODER_HOME}/.profile" <<'EOF'
# Added by setup-coder.sh: Rust toolchain
if [ -f "$HOME/.cargo/env" ]; then
. "$HOME/.cargo/env"
fi
EOF
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.profile"
fi
info "rustup + stable toolchain installed"
fi
# Install cratedex (Rust docs MCP server)
if run_as_coder '. "$HOME/.cargo/env" && command -v cratedex' &>/dev/null; then
info "cratedex already installed: $(run_as_coder '. "$HOME/.cargo/env" && cratedex --version 2>/dev/null' || echo 'unknown')"
else
info "Installing cratedex (this may take a few minutes)..."
run_as_coder '. "$HOME/.cargo/env" && cargo install cratedex'
info "cratedex installed"
fi
# Install system-level cratedex.service via cratedex's built-in installer.
# This handles copying the binary, generating the unit file with correct paths
# (home dir, toolchain, etc.), and enabling the service.
if [[ -f /etc/systemd/system/cratedex.service ]] \
&& grep -q "User=${CODER_USER}" /etc/systemd/system/cratedex.service; then
info "cratedex.service already installed (User=${CODER_USER})"
else
info "Installing cratedex systemd service (User=${CODER_USER})..."
"${CODER_HOME}/.cargo/bin/cratedex" install-service --system --run-as "${CODER_USER}"
fi
# Ensure it's enabled and running
systemctl enable cratedex.service 2>/dev/null || true
if systemctl is-active --quiet cratedex.service; then
info "cratedex.service is running"
else
systemctl start cratedex.service
info "cratedex.service started"
fi
# ---------------------------------------------------------------------------
# Step 4: Install Node.js via nvm
# ---------------------------------------------------------------------------
step "Installing Node.js (nvm) for ${CODER_USER}"
NVM_VERSION="v0.40.1"
NVM_INSTALL_URL="https://raw.githubusercontent.com/nvm-sh/nvm/${NVM_VERSION}/install.sh"
# Ensure dependencies are present before we try to bootstrap nvm.
if ! command -v curl &>/dev/null; then
apt-get install -y --no-install-recommends curl ca-certificates
fi
if ! command -v git &>/dev/null; then
apt-get install -y --no-install-recommends git
fi
# Do not treat "node exists" as success; later steps assume nvm exists.
if run_as_coder 'export NVM_DIR="$HOME/.nvm" && [ -s "$NVM_DIR/nvm.sh" ]' &>/dev/null; then
info "nvm already installed for ${CODER_USER}"
else
info "Installing nvm (${NVM_VERSION})..."
# nvm's install script may exit non-zero due to a sourcing issue in
# non-interactive login shells. Tolerate that and verify separately.
run_as_coder 'export NVM_DIR="$HOME/.nvm" && curl -fsSL "'"${NVM_INSTALL_URL}"'" | bash' || true
# If ~/.nvm exists but is missing nvm.sh, the installer can think it is
# "already installed" and skip cloning. Repair by doing a fresh clone.
if ! run_as_coder 'export NVM_DIR="$HOME/.nvm" && [ -s "$NVM_DIR/nvm.sh" ]'; then
warn "nvm appears partially installed for ${CODER_USER} — attempting repair"
# Use absolute paths instead of run_as_coder to avoid sudo -i
# escaping issues with compound commands (&&) in nested shells.
rm -rf "${CODER_HOME}/.nvm"
sudo -u "$CODER_USER" git clone https://github.com/nvm-sh/nvm.git "${CODER_HOME}/.nvm"
sudo -u "$CODER_USER" git -C "${CODER_HOME}/.nvm" checkout "${NVM_VERSION}"
fi
if ! run_as_coder 'export NVM_DIR="$HOME/.nvm" && [ -s "$NVM_DIR/nvm.sh" ]'; then
err "nvm installation failed — nvm.sh not found at \$HOME/.nvm/nvm.sh for ${CODER_USER}"
run_as_coder 'export NVM_DIR="$HOME/.nvm" && echo "Debug: HOME=$HOME NVM_DIR=$NVM_DIR" && ls -la "$NVM_DIR" 2>/dev/null || true' >&2 || true
exit 1
fi
fi
if run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && command -v node' &>/dev/null; then
info "Node.js already available for ${CODER_USER}: $(run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && node --version')"
else
run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && nvm install --lts'
info "Node.js installed: $(run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && node --version')"
fi
# ---------------------------------------------------------------------------
# Step 5: Install Python ecosystem (uv + ruff + qt-mcp)
# ---------------------------------------------------------------------------
step "Installing Python ecosystem for ${CODER_USER}"
if ! command -v python3 &>/dev/null; then
apt-get install -y --no-install-recommends python3 python3-venv
info "Python3 installed"
else
info "Python3 already available: $(python3 --version)"
fi
if run_as_coder 'command -v uv' &>/dev/null; then
info "uv already available: $(run_as_coder 'uv --version')"
else
run_as_coder 'curl -LsSf https://astral.sh/uv/install.sh | sh'
info "uv installed"
fi
# ruff — fast Python linter/formatter
if run_as_coder '$HOME/.local/bin/uv tool list 2>/dev/null | grep -q ruff'; then
info "ruff already installed"
else
run_as_coder '$HOME/.local/bin/uv tool install ruff'
info "ruff installed"
fi
# qt-mcp — Qt/PySide6 desktop app inspection (MCP server)
if run_as_coder '$HOME/.local/bin/uv tool list 2>/dev/null | grep -q qt-mcp'; then
info "qt-mcp already installed"
else
# Try PyPI first; fall back to AITOOLS source if available
if run_as_coder '$HOME/.local/bin/uv tool install qt-mcp' 2>/dev/null; then
info "qt-mcp installed (PyPI)"
elif [[ -n "${AITOOLS_DIR:-}" && -d "${AITOOLS_DIR}/qt-mcp" ]]; then
# Install from local source — copy to a temp dir readable by coder
_qtmcp_tmp="/tmp/qt-mcp-install-$$"
cp -r "${AITOOLS_DIR}/qt-mcp" "$_qtmcp_tmp"
chown -R "${CODER_USER}:${CODER_USER}" "$_qtmcp_tmp"
run_as_coder "\$HOME/.local/bin/uv tool install '${_qtmcp_tmp}'"
rm -rf "$_qtmcp_tmp"
info "qt-mcp installed (from AITOOLS)"
else
warn "qt-mcp not available on PyPI yet and AITOOLS_DIR not set — install later: uv tool install qt-mcp"
fi
fi
# ---------------------------------------------------------------------------
# Step 6: Configure gh CLI
# ---------------------------------------------------------------------------
step "Configuring GitHub CLI"
if ! command -v gh &>/dev/null; then
info "Installing gh CLI..."
if ! command -v curl &>/dev/null; then
apt-get install -y --no-install-recommends curl
fi
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
| dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
> /etc/apt/sources.list.d/github-cli.list
apt-get update -y
apt-get install -y gh
info "gh CLI installed"
else
info "gh CLI already installed: $(gh --version | head -1)"
fi
if run_as_coder 'gh auth status' &>/dev/null 2>&1; then
info "gh CLI already authenticated for ${CODER_USER}"
else
echo ""
echo -e "${YELLOW}GitHub authentication for the ${CODER_USER} user:${NC}"
echo ""
echo " Create a fine-grained PAT at: https://github.com/settings/personal-access-tokens/new"
echo " Scope it to ONLY the repos the ${CODER_USER} user needs access to."
echo " Required permissions: Contents (read/write), Pull requests (read/write)"
echo ""
if [[ "${ASSUME_YES}" == "true" ]]; then
warn "Skipping gh auth in non-interactive mode. Run manually:"
warn " sudo -u ${CODER_USER} -i gh auth login --with-token"
else
echo -en "${YELLOW}Paste the fine-grained PAT (or press Enter to skip): ${NC}"
read -r -s pat
echo ""
if [[ -n "$pat" ]]; then
echo "$pat" | run_as_coder 'gh auth login --with-token'
info "gh CLI authenticated for ${CODER_USER}"
else
warn "Skipped gh auth. Configure later: sudo -u ${CODER_USER} -i gh auth login"
fi
fi
fi
# Export GITHUB_PERSONAL_ACCESS_TOKEN so Podman can forward it to MCP containers
if run_as_coder 'gh auth status' &>/dev/null 2>&1; then
_gh_token="$(run_as_coder 'gh auth token' 2>/dev/null || true)"
if [[ -n "$_gh_token" ]]; then
_coder_set_env GITHUB_PERSONAL_ACCESS_TOKEN "$_gh_token"
info "GITHUB_PERSONAL_ACCESS_TOKEN set from gh auth token"
fi
fi
# Pull GitHub MCP server image for Podman-based MCP
run_as_coder 'podman pull ghcr.io/github/github-mcp-server:latest'
info "GitHub MCP server image pulled"
# ---------------------------------------------------------------------------
# Step 7: Install Agent CLIs
# ---------------------------------------------------------------------------
step "Installing agent CLIs"
_ensure_local_bin_on_path() {
# Claude Code's native installer places the binary in ~/.local/bin by default.
# Ensure that is on PATH for login shells.
local profile="${CODER_HOME}/.profile"
if [[ ! -f "$profile" ]]; then
touch "$profile"
chown "${CODER_USER}:${CODER_USER}" "$profile"
fi
if ! grep -qs 'HOME/.local/bin' "$profile"; then
cat >> "$profile" <<'EOF'
# Added by setup-coder.sh: ensure user-local binaries are discoverable
if [ -d "$HOME/.local/bin" ]; then
PATH="$HOME/.local/bin:$PATH"
fi
EOF
chown "${CODER_USER}:${CODER_USER}" "$profile"
fi
}
_install_npm_cli() {
local bin="$1"
local pkg="$2"
local label="$3"
if run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && command -v '"${bin}"'' &>/dev/null; then
info "${label} already installed"
else
run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && npm i -g '"${pkg}"''
info "${label} installed"
fi
}
# Anthropic: Claude Code (native installer; npm installation is deprecated)
_ensure_local_bin_on_path
if run_as_coder 'command -v claude' &>/dev/null; then
info "Claude Code CLI already installed"
else
run_as_coder 'curl -fsSL https://claude.ai/install.sh | bash'
if run_as_coder 'command -v claude' &>/dev/null; then
info "Claude Code CLI installed (native)"
else
err "Claude Code CLI install failed — 'claude' not found on PATH for ${CODER_USER}"
run_as_coder 'ls -la "$HOME/.local/bin" 2>/dev/null || true' >&2 || true
exit 1
fi
fi
# OpenAI: Codex CLI (open source)
_install_npm_cli "codex" "@openai/codex" "OpenAI Codex CLI"
# Google: Gemini CLI (open source)
_install_npm_cli "gemini" "@google/gemini-cli" "Google Gemini CLI"
# ---------------------------------------------------------------------------
# Step 8: MCP server config + CLAUDE.md
# ---------------------------------------------------------------------------
step "Configuring MCP servers and CLAUDE.md"
mkdir -p "${CODER_HOME}/.claude"
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.claude"
# MCP servers — written to ~/.claude.json (Claude Code's global config)
# Uses python3 to merge mcpServers into existing ~/.claude.json (or create it)
_claude_json="${CODER_HOME}/.claude.json"
_mcp_payload=$(cat << 'MCP_JSON'
{
"cratedex": {
"type": "http",
"url": "http://127.0.0.1:3737/mcp"
},
"coder": {
"type": "stdio",
"command": "node",
"args": ["CODER_HOME_PLACEHOLDER/tools/coder/bin/coder-mcp.js"]
},
"qt-mcp": {
"type": "stdio",
"command": "CODER_HOME_PLACEHOLDER/.local/bin/uvx",
"args": ["qt-mcp"]
},
"github": {
"type": "stdio",
"command": "podman",
"args": ["run", "-i", "--rm", "-e", "GITHUB_PERSONAL_ACCESS_TOKEN", "ghcr.io/github/github-mcp-server"]
}
}
MCP_JSON
)
_mcp_payload="${_mcp_payload//CODER_HOME_PLACEHOLDER/${CODER_HOME}}"
python3 -c "
import json, sys, os
path = sys.argv[1]
servers = json.loads(sys.argv[2])
data = {}
if os.path.isfile(path):
with open(path) as f:
data = json.load(f)
data['mcpServers'] = servers
with open(path, 'w') as f:
json.dump(data, f, indent=2)
f.write('\n')
" "$_claude_json" "$_mcp_payload"
chown "${CODER_USER}:${CODER_USER}" "$_claude_json"
info "MCP servers written to ${_claude_json}"
# Gemini CLI MCP settings — ~/.gemini/settings.json
mkdir -p "${CODER_HOME}/.gemini"
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.gemini"
cat > "${CODER_HOME}/.gemini/settings.json" << GEMINI_MCP
{
"mcpServers": {
"github": {
"command": "podman",
"args": ["run", "-i", "--rm", "-e", "GITHUB_PERSONAL_ACCESS_TOKEN", "ghcr.io/github/github-mcp-server"]
}
}
}
GEMINI_MCP
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.gemini/settings.json"
info "MCP server config written to ${CODER_HOME}/.gemini/settings.json"
# Codex CLI MCP settings — ~/.codex/config.toml
mkdir -p "${CODER_HOME}/.codex"
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.codex"
cat > "${CODER_HOME}/.codex/config.toml" << CODEX_MCP
[mcp_servers.github]
command = "podman"
args = ["run", "-i", "--rm", "-e", "GITHUB_PERSONAL_ACCESS_TOKEN", "ghcr.io/github/github-mcp-server"]
env_vars = ["GITHUB_PERSONAL_ACCESS_TOKEN"]
CODEX_MCP
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.codex/config.toml"
info "MCP server config written to ${CODER_HOME}/.codex/config.toml"
# Register MCP servers as discoverable services
if [[ -d "${HOMELAB_DIR}/services" ]]; then
cat > "${HOMELAB_DIR}/services/cargo-mcp.svc" << SVC_CARGO
name=cargo-mcp
type=systemd
unit=cratedex.service
port=3737
url=http://127.0.0.1:3737
description=Rust documentation & diagnostics (MCP)
SVC_CARGO
cat > "${HOMELAB_DIR}/services/qt-mcp.svc" << SVC_QT
name=qt-mcp
type=stdio
url=tcp://localhost:9142
description=Qt/PySide6 app inspection (MCP, stdio)
SVC_QT
cat > "${HOMELAB_DIR}/services/coder.svc" << SVC_CODER
name=coder
type=stdio
url=stdio://${CODER_HOME}/tools/coder/bin/coder-mcp.js
description=Multi-agent coding workflow (MCP, stdio)
SVC_CODER
cat > "${HOMELAB_DIR}/services/github-mcp.svc" << SVC_GITHUB
name=github-mcp
type=stdio
url=podman://ghcr.io/github/github-mcp-server
description=GitHub API tools (MCP, stdio via Podman)
SVC_GITHUB
cat > "${HOMELAB_DIR}/services/ollama.svc" << SVC_OLLAMA
name=ollama
type=systemd
unit=ollama.service
user=${CODER_USER}
port=11434
route=llm
url=http://127.0.0.1:11434
description=LLM inference with GPU (Podman Quadlet + ROCm)
SVC_OLLAMA
chown root:homelab "${HOMELAB_DIR}/services"/{cargo-mcp,qt-mcp,coder,github-mcp,ollama}.svc
chmod 664 "${HOMELAB_DIR}/services"/{cargo-mcp,qt-mcp,coder,github-mcp,ollama}.svc
info "Registered MCP services in ${HOMELAB_DIR}/services/"
else
warn "Services directory not found — run setup-homelab.sh first"
fi
# ---------------------------------------------------------------------------
# Step 9: CLAUDE.md for coder user (templated with actual values)
# ---------------------------------------------------------------------------
cat > "${CODER_HOME}/.claude/CLAUDE.md" << CODER_CLAUDE_MD
# Coder Agent Environment
You are running as the \`${CODER_USER}\` user on ${HOSTNAME_SHORT} (${LAN_IP:-unknown}), an isolated
account for AI coding agents.
## Security Boundaries
- You do NOT have access to \`${PRIMARY_HOME:-/home/primary-user}\` (permission denied)
- You are NOT in the docker or sudo groups
- Use **Podman** (rootless) for containers, NOT Docker
- Your GitHub token is a fine-grained PAT scoped to specific repos only
- Do not attempt privilege escalation or access other users' data
## MCP Servers
### cargo-mcp — Rust documentation & diagnostics
- HTTP transport: \`127.0.0.1:3737\` (shared service, runs as ${PRIMARY_USER:-system})
- \`register_project\` first, then \`search_docs\`, \`get_diagnostics\`, \`list_crates\`
### coder — Multi-agent coding workflow
- stdio MCP: \`node ${CODER_HOME}/tools/coder/bin/coder-mcp.js\`
- Pipeline: list_issues -> draft_issue -> create_plan -> implement -> review_and_test -> create_pr
- \`coder_auto\` for autonomous batch processing
### qt-mcp — Qt/PySide6 desktop app inspection
- stdio MCP: \`uvx qt-mcp\`
- Target app must have probe installed: \`QT_MCP_PROBE=1 python -m your_app\`
- \`qt_snapshot\` first (get widget tree + refs), then interact via refs
### github — GitHub API tools
- stdio MCP via Podman: \`podman run -i --rm -e GITHUB_PERSONAL_ACCESS_TOKEN ghcr.io/github/github-mcp-server\`
- Tools: issues, pull_requests, repos, code search, users, and more
- Your token is a fine-grained PAT scoped to specific repos only
## Homelab CLI
Expose dev servers on the LAN through Traefik reverse proxy:
\`\`\`bash
homelab dev up <name> <port> --ttl 4h # route name.${DOMAIN} -> localhost:port
homelab dev down <name> # remove route
homelab dev ps # list active routes
homelab ls --json # JSON output for parsing
\`\`\`
**Always use \`homelab dev up\` with a \`--ttl\`** so routes auto-expire.
Do not use \`homelab add\` directly — prefer \`dev up\` which sets owner and TTL.
## Service Discovery
Discover registered services (Ollama, MCP servers, etc.):
\`\`\`bash
homelab svc ls # list services + live status
homelab svc ls --json # JSON output
homelab svc info <name> # full service metadata
homelab svc start <name> # start a service
homelab svc stop <name> # stop a service
homelab svc log <name> # follow service logs
\`\`\`
### Ollama (LLM inference)
Ollama provides local LLM inference with AMD ROCm GPU acceleration via Podman:
- **API**: \`http://localhost:11434\`
- **LAN**: \`https://llm.${DOMAIN}\`
- **Service**: \`systemctl status ollama\` / \`journalctl -u ollama -f\`
- **Models**: \`curl -s localhost:11434/api/tags | jq\`
- **Pull model**: \`curl -X POST localhost:11434/api/pull -d '{"name":"qwen2.5-coder:7b"}'\`
## Containers
Use Podman for any container needs:
\`\`\`bash
podman run --rm -p 3000:3000 my-image # rootless, no daemon
podman build -t my-image .
\`\`\`
Do NOT use \`docker\` commands — you don't have Docker access.
## Coding Conventions
- Working directory: \`~/workspace/\`
- Use conventional commits (feat:, fix:, refactor:, etc.)
- Run tests before creating PRs
- Keep branches short-lived, one feature per branch
CODER_CLAUDE_MD
chown "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.claude/CLAUDE.md"
info "CLAUDE.md written to ${CODER_HOME}/.claude/CLAUDE.md"
# ---------------------------------------------------------------------------
# Step 10: Create workspace directory
# ---------------------------------------------------------------------------
step "Creating workspace"
mkdir -p "${CODER_HOME}/workspace" "${CODER_HOME}/tools"
chown -R "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/workspace" "${CODER_HOME}/tools"
info "Created ${CODER_HOME}/workspace/ and ${CODER_HOME}/tools/"
# ---------------------------------------------------------------------------
# Step 10.5: Install coder-mcp (multi-agent workflow tool)
# ---------------------------------------------------------------------------
step "Installing coder-mcp"
if [[ -n "${AITOOLS_DIR}" && -d "${AITOOLS_DIR}/coder" ]]; then
_coder_src="${AITOOLS_DIR}/coder"
_coder_dst="${CODER_HOME}/tools/coder"
if [[ -f "${_coder_dst}/bin/coder-mcp.js" && -f "${_coder_dst}/node_modules/.package-lock.json" ]]; then
info "coder-mcp already installed at ${_coder_dst}"
else
info "Copying coder from ${_coder_src} → ${_coder_dst}"
rm -rf "${_coder_dst}"
mkdir -p "${_coder_dst}"
# Copy source files (exclude node_modules, .git, test fixtures)
# Use tar pipe to avoid rsync dependency
tar -C "${_coder_src}" --exclude='node_modules' --exclude='.git' --exclude='test' \
-cf - . | tar -C "${_coder_dst}" -xf -
chown -R "${CODER_USER}:${CODER_USER}" "${_coder_dst}"
info "Running npm install for coder-mcp..."
run_as_coder 'export NVM_DIR="$HOME/.nvm" && . "$NVM_DIR/nvm.sh" && cd ~/tools/coder && npm install --omit=dev'
info "coder-mcp installed"
fi
else
warn "AITOOLS_DIR not set or ${AITOOLS_DIR:-}/coder not found"
warn "Set AITOOLS_DIR=/path/to/AITOOLS or copy coder manually to ${CODER_HOME}/tools/coder/"
fi
# ---------------------------------------------------------------------------
# Step 10.6: Ollama (LLM inference via Podman with GPU)
# ---------------------------------------------------------------------------
step "Setting up Ollama (Podman Quadlet + ROCm GPU)"
# Pull ollama:rocm image for coder user
if run_as_coder 'podman image exists ollama/ollama:rocm' &>/dev/null; then
info "ollama:rocm image already available"
else
info "Pulling ollama:rocm image (this may take a while)..."
run_as_coder 'podman pull docker.io/ollama/ollama:rocm'
info "ollama:rocm image pulled"
fi
# Create model storage directory
mkdir -p "${CODER_HOME}/.ollama/models"
chown -R "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.ollama"
# Build Quadlet GPU directives
_ollama_devices=""
_ollama_podman_args=""
_ollama_gpu_env=""
if [[ -e /dev/kfd ]] && getfacl /dev/kfd 2>/dev/null | grep -q "user:${CODER_USER}:rw"; then
_ollama_devices="AddDevice=/dev/kfd"
for _rn in /dev/dri/renderD*; do
[[ -e "$_rn" ]] && _ollama_devices="${_ollama_devices}
AddDevice=${_rn}"
done
_ollama_podman_args="PodmanArgs=--runtime=crun --group-add keep-groups --userns=keep-id"
if [[ -n "${HSA_OVERRIDE_GFX_VERSION:-}" ]]; then
_ollama_gpu_env="Environment=HSA_OVERRIDE_GFX_VERSION=${HSA_OVERRIDE_GFX_VERSION}"
fi
info "GPU passthrough enabled"
else
_ollama_podman_args="PodmanArgs=--runtime=crun --userns=keep-id"
warn "No GPU ACLs for ${CODER_USER} — Ollama will run CPU-only"
fi
# Remove old system-level ollama.service if present (migrated to Quadlet)
if [[ -f /etc/systemd/system/ollama.service ]]; then
systemctl stop ollama.service 2>/dev/null || true
systemctl disable ollama.service 2>/dev/null || true
rm -f /etc/systemd/system/ollama.service
systemctl daemon-reload
info "Removed old system-level ollama.service (migrated to Quadlet)"
fi
# Write Quadlet file for Ollama (standalone — not in the homelab pod)
_quadlet_dir="${CODER_HOME}/.config/containers/systemd"
mkdir -p "$_quadlet_dir"
cat > "${_quadlet_dir}/ollama.container" << OLLAMA_QUADLET
[Unit]
Description=Ollama LLM inference (Podman, ROCm GPU)
After=network-online.target
Wants=network-online.target
[Container]
Image=docker.io/ollama/ollama:rocm
ContainerName=ollama
${_ollama_podman_args}
${_ollama_devices}
${_ollama_gpu_env}
Environment=OLLAMA_KEEP_ALIVE=5m
Environment=OLLAMA_MODELS=/ollama/models
Volume=${CODER_HOME}/.ollama:/ollama
PublishPort=127.0.0.1:11434:11434
[Install]
WantedBy=default.target
OLLAMA_QUADLET
chown -R "${CODER_USER}:${CODER_USER}" "${CODER_HOME}/.config"
info "ollama.container Quadlet written"
# Reload and start
systemctl --user -M "${CODER_USER}@" daemon-reload
systemctl --user -M "${CODER_USER}@" enable ollama.service 2>/dev/null || true
if systemctl --user -M "${CODER_USER}@" is-active --quiet ollama.service 2>/dev/null; then
info "ollama.service is running"
else
# Check if port 11434 is already in use
if ss -tlnp 2>/dev/null | grep -q ':11434 '; then
warn "Port 11434 already in use — ollama.service enabled but not started"
warn "Stop the existing Ollama then: systemctl --user -M ${CODER_USER}@ start ollama.service"
else
systemctl --user -M "${CODER_USER}@" start ollama.service
sleep 3