-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathMakefile
More file actions
249 lines (215 loc) · 11.9 KB
/
Makefile
File metadata and controls
249 lines (215 loc) · 11.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
# Exploit Lab — Container management
# All services run on an isolated Docker network with no internet access.
COMPOSE = docker compose -f docker-compose.lab.yml
PROJECT = exploit-lab
KIND_CLUSTER = exploit-lab-k8s
.PHONY: lab-up lab-down lab-status lab-logs lab-shell lab-restart \
lab-k8s-up lab-k8s-down lab-k8s-status \
lab-adcs-up lab-adcs-down lab-adcs-destroy \
lab-llm-up lab-llm-down \
lab-saml-up lab-saml-down \
lab-databricks-up lab-databricks-down \
lab-oidc-up lab-oidc-down \
lab-sccm-up lab-sccm-down \
lab-arc-up lab-arc-down
## Start the contained lab environment
lab-up:
$(COMPOSE) up -d --build
@echo ""
@echo "=== Lab is running ==="
@echo " C2 server: http://127.0.0.1:8443"
@echo " Operator API: http://127.0.0.1:8443/api/sessions"
@echo " Exploit server: http://127.0.0.1:9090"
@echo " Target app 1: http://127.0.0.1:8501"
@echo " Target app 2: http://127.0.0.1:8502"
@echo " Mock Entra IdP: http://127.0.0.1:9100"
@echo " Mock IMDS: http://127.0.0.1:9200"
@echo ""
@echo " Dashboard: python3 tools/dashboard/dashboard_cli.py --c2 http://127.0.0.1:8443"
@echo " K8s post-ex: make lab-k8s-up (requires: kind, kubectl)"
@echo ""
@echo "Network is fully isolated (internal: true). No internet access."
## Stop and destroy the lab (removes containers, networks, volumes)
lab-down:
$(COMPOSE) down -v --remove-orphans
@echo "Lab destroyed."
## Show running lab services
lab-status:
$(COMPOSE) ps
@echo ""
@curl -s http://127.0.0.1:8443/api/status 2>/dev/null | python3 -m json.tool || echo "(C2 server not reachable)"
## Tail logs from all lab services
lab-logs:
$(COMPOSE) logs -f
## Open a shell in a lab container (usage: make lab-shell SVC=beacon-1)
lab-shell:
$(COMPOSE) exec $(SVC) /bin/bash
## Restart a specific service (usage: make lab-restart SVC=c2-server)
lab-restart:
$(COMPOSE) restart $(SVC)
## ── K8s post-exploitation lab (WS3) ──────────────────────────────────────────
## Requires: kind (https://kind.sigs.k8s.io/), kubectl
## Creates a local kind cluster with deliberately misconfigured RBAC,
## a simulated Databricks Apps pod, and a mock IMDS service.
## Create and configure the kind cluster for K8s post-ex demos
lab-k8s-up:
@command -v kind >/dev/null 2>&1 || { echo "ERROR: kind not found. Install: https://kind.sigs.k8s.io/docs/user/quick-start/"; exit 1; }
@command -v kubectl >/dev/null 2>&1 || { echo "ERROR: kubectl not found. Install: https://kubernetes.io/docs/tasks/tools/"; exit 1; }
@echo "==> Creating kind cluster '$(KIND_CLUSTER)'..."
kind create cluster --name $(KIND_CLUSTER) --config infra/lab/kind-cluster/kind-config.yaml || \
echo "Cluster may already exist — continuing."
@echo "==> Applying lab namespaces and RBAC..."
kubectl --context kind-$(KIND_CLUSTER) apply -f infra/lab/kind-cluster/lab-namespaces.yaml
kubectl --context kind-$(KIND_CLUSTER) apply -f infra/lab/kind-cluster/rbac-vulnerable.yaml
@echo "==> Deploying mock IMDS service..."
kubectl --context kind-$(KIND_CLUSTER) apply -f infra/lab/kind-cluster/mock-imds-service.yaml
@echo "==> Deploying simulated Databricks app pod..."
kubectl --context kind-$(KIND_CLUSTER) apply -f infra/lab/kind-cluster/databricks-app-pod.yaml
@echo ""
@echo "=== K8s lab is running ==="
@echo " Cluster: kind-$(KIND_CLUSTER)"
@echo " Context: kind-$(KIND_CLUSTER)"
@echo ""
@echo " Run k8s_recon module:"
@echo " kubectl --context kind-$(KIND_CLUSTER) exec -it databricks-app -- \\"
@echo " python3 /tools/post-exploit-staging/commands/k8s_recon/run.py --output report"
@echo ""
@echo " Or from host with KUBECONFIG:"
@echo " KUBECONFIG=\$$(kind get kubeconfig --name $(KIND_CLUSTER) 2>/dev/null | head -1) \\"
@echo " python3 tools/post-exploit-staging/commands/k8s_recon/run.py --output report"
## Destroy the kind cluster
lab-k8s-down:
kind delete cluster --name $(KIND_CLUSTER) || true
@echo "K8s lab cluster deleted."
## Show kind cluster status and pod listing
lab-k8s-status:
@kind get clusters 2>/dev/null | grep -q $(KIND_CLUSTER) || { echo "K8s lab not running. Run: make lab-k8s-up"; exit 0; }
@echo "=== K8s cluster: $(KIND_CLUSTER) ==="
kubectl --context kind-$(KIND_CLUSTER) get pods --all-namespaces
@echo ""
kubectl --context kind-$(KIND_CLUSTER) get svc --all-namespaces
## ── AD CS lab (WS-C) ─────────────────────────────────────────────────────────
## Requires: vagrant, virtualbox, vagrant plugin install vagrant-reload
## Stands up dc01 (DC + Enterprise CA) + ws01 + ws02 (workstations)
## Domain: corp.lab.local Network: 192.168.56.0/24 (host-only, no internet)
## Create and provision the AD CS Vagrant lab (dc01 + ws01 + ws02)
lab-adcs-up:
@command -v vagrant >/dev/null 2>&1 || { echo "ERROR: vagrant not found. Install: https://www.vagrantup.com/"; exit 1; }
@command -v VBoxManage >/dev/null 2>&1 || { echo "ERROR: VirtualBox not found. Install: https://www.virtualbox.org/"; exit 1; }
@echo "==> Starting AD CS lab..."
cd infra/lab/ad-cs && vagrant up
@echo ""
@echo "=== AD CS Lab is running ==="
@echo " dc01 (DC + CA) : 192.168.56.10 domain: corp.lab.local"
@echo " ws01 : 192.168.56.11"
@echo " ws02 : 192.168.56.12"
@echo ""
@echo " Enumerate templates:"
@echo " EXPLOIT_LAB_ACTIVE=1 EXPLOIT_LAB_OFFLINE_VM=1 EXPLOIT_FIXTURE_ROOT=/tmp/lab \\"
@echo " python tools/ad-cs/enum/enum.py --domain corp.lab.local --dc-ip 192.168.56.10 \\"
@echo " --username alice --password 'AlicePass!1' --output /tmp/lab/findings.json"
@echo ""
@echo " Run ESC1 exploit:"
@echo " EXPLOIT_LAB_ACTIVE=1 EXPLOIT_LAB_OFFLINE_VM=1 EXPLOIT_FIXTURE_ROOT=/tmp/lab \\"
@echo " python tools/ad-cs/exploit/esc01/exploit.py --domain corp.lab.local \\"
@echo " --dc-ip 192.168.56.10 --username alice --password 'AlicePass!1' \\"
@echo " --target-user administrator --output-dir /tmp/lab/esc01-out"
## Stop (halt) the AD CS Vagrant lab VMs
lab-adcs-down:
cd infra/lab/ad-cs && vagrant halt
@echo "AD CS lab halted."
## Destroy the AD CS Vagrant lab VMs (removes all VMs and disks)
lab-adcs-destroy:
cd infra/lab/ad-cs && vagrant destroy -f
@echo "AD CS lab destroyed."
## ── LLM/Agent attack lab (WS-E) ─────────────────────────────────────────────
## Requires: docker, docker compose, ~5GB for Ollama model download
## Stands up: Ollama (port 11434) + copilot Flask app (port 8080)
## Internal network only — no internet access.
## Start the LLM target lab (Ollama + enterprise copilot app)
lab-llm-up:
docker compose -f infra/lab/llm-target/docker-compose.yml up -d --build
@echo ""
@echo "=== LLM Lab is running ==="
@echo " Copilot app: http://127.0.0.1:8080"
@echo " Ollama API: http://127.0.0.1:11434"
@echo ""
@echo " Pull model (first run): docker exec ollama ollama pull llama3.1:8b"
@echo " Run injection eval: EXPLOIT_LAB_ACTIVE=1 python tools/llm-attacks/indirect-injection/eval_injection.py --target http://127.0.0.1:8080"
## Stop the LLM target lab
lab-llm-down:
docker compose -f infra/lab/llm-target/docker-compose.yml down -v --remove-orphans
@echo "LLM lab stopped."
## ── Mock SAML lab (WS-D) ──────────────────────────────────────────────────────
## Stands up SimpleSAMLphp-equivalent Flask SAML SP on port 9400
## Start the mock SAML SP/IdP
lab-saml-up:
docker build -t mock-saml infra/lab/mock-saml/
docker run -d --name mock-saml --network exploit-lab_internal -p 9400:9400 \
-e LAB_SAML_TRUST_ALL=1 mock-saml || \
docker compose -f infra/lab/mock-saml/../../../docker-compose.lab.yml up -d mock-saml 2>/dev/null || \
docker run -d --name mock-saml -p 9400:9400 -e LAB_SAML_TRUST_ALL=1 mock-saml
@echo "Mock SAML SP: http://127.0.0.1:9400"
## Stop the mock SAML SP/IdP
lab-saml-down:
docker stop mock-saml && docker rm mock-saml || true
@echo "Mock SAML stopped."
## ── Mock Databricks lab (WS-D) ───────────────────────────────────────────────
## Stands up mock Databricks Apps OAuth endpoint on port 9500
## Start the mock Databricks Apps OAuth mock
lab-databricks-up:
docker build -t mock-databricks infra/lab/mock-databricks/
docker run -d --name mock-databricks -p 9500:9500 mock-databricks
@echo "Mock Databricks: http://127.0.0.1:9500"
@echo " OAuth token: POST http://127.0.0.1:9500/oidc/v1/token"
@echo " OBO flow: POST http://127.0.0.1:9500/oidc/v1/token (grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer)"
## Stop the mock Databricks Apps OAuth mock
lab-databricks-down:
docker stop mock-databricks && docker rm mock-databricks || true
@echo "Mock Databricks stopped."
## ── Mock OIDC Issuer lab (WS-D) ──────────────────────────────────────────────
## Stands up GitHub Actions OIDC issuer simulation on port 9300
## Start the mock OIDC issuer (GitHub Actions simulation)
lab-oidc-up:
@echo "Starting mock OIDC issuer on 127.0.0.1:9300..."
EXPLOIT_LAB_ACTIVE=1 python3 tools/cloud-identity/wif/mock_oidc_issuer.py &
@echo "Mock OIDC issuer: http://127.0.0.1:9300"
@echo " OIDC config: http://127.0.0.1:9300/.well-known/openid-configuration"
## Stop the mock OIDC issuer
lab-oidc-down:
pkill -f mock_oidc_issuer.py || true
@echo "Mock OIDC issuer stopped."
## ── Mock SCCM lab (v5) ───────────────────────────────────────────────────────
## Stands up mock SCCM management point on port 9600 for ELEVATE1/ELEVATE2 demos
## Start the mock SCCM service
lab-sccm-up:
docker build -t mock-sccm infra/lab/mock-sccm/
docker run -d --name mock-sccm -p 9600:9600 \
-e EXPLOIT_LAB_ACTIVE=1 mock-sccm
@echo "Mock SCCM management point: http://127.0.0.1:9600"
@echo " Enumerate: EXPLOIT_LAB_ACTIVE=1 EXPLOIT_LAB_OFFLINE_VM=1 EXPLOIT_FIXTURE_ROOT=/tmp/lab \\"
@echo " python tools/lateral-movement/sccm-abuse/enumerate.py --target 127.0.0.1:9600"
@echo " ELEVATE1: EXPLOIT_LAB_ACTIVE=1 EXPLOIT_LAB_OFFLINE_VM=1 EXPLOIT_FIXTURE_ROOT=/tmp/lab \\"
@echo " python tools/lateral-movement/sccm-abuse/elevate.py --target 127.0.0.1:9600 --technique ELEVATE1"
## Stop the mock SCCM service
lab-sccm-down:
docker stop mock-sccm && docker rm mock-sccm || true
@echo "Mock SCCM stopped."
## ── Mock Azure Arc lab (v5) ──────────────────────────────────────────────────
## Extends mock-imds (port 9200) with Azure Arc MSI endpoint for arc_pivot.py demos
## Start the Azure Arc IMDS extension (adds /metadata/identity endpoint to mock-imds)
lab-arc-up:
@echo "Starting Azure Arc MSI pivot lab..."
@echo " This lab reuses mock-imds (port 9200) with Arc MSI endpoint enabled."
docker build -t mock-imds-arc infra/lab/mock-imds/ \
--build-arg ARC_MSI=1 2>/dev/null || \
EXPLOIT_LAB_ACTIVE=1 python3 infra/lab/mock-imds/mock_imds.py --arc-msi &
@echo "Mock IMDS (Azure Arc mode): http://127.0.0.1:9200"
@echo " Arc MSI token: GET http://127.0.0.1:9200/metadata/identity/oauth2/token?api-version=2020-06-01"
@echo " Arc pivot: EXPLOIT_LAB_ACTIVE=1 ENTRA_LAB_TENANT_ID=lab-tenant-001 \\"
@echo " python tools/lateral-movement/azure-arc/arc_pivot.py"
## Stop the Azure Arc IMDS extension
lab-arc-down:
pkill -f mock_imds.py || true
docker stop mock-imds-arc 2>/dev/null && docker rm mock-imds-arc 2>/dev/null || true
@echo "Azure Arc lab stopped."