diff --git a/skills.json b/skills.json
index a2ad60ae..b04d0343 100644
--- a/skills.json
+++ b/skills.json
@@ -100,54 +100,6 @@
"large"
]
},
- {
- "id": "yolo-detection-2026-coral-tpu",
- "name": "YOLO 2026 Coral TPU",
- "description": "Google Coral Edge TPU — real-time object detection with LiteRT (INT8, ~4ms inference at 320×320).",
- "version": "2.0.0",
- "category": "detection",
- "path": "skills/detection/yolo-detection-2026-coral-tpu",
- "tags": [
- "detection",
- "yolo",
- "coral",
- "edge-tpu",
- "litert",
- "real-time",
- "coco"
- ],
- "platforms": [
- "linux-x64",
- "linux-arm64",
- "darwin-arm64",
- "darwin-x64",
- "win-x64"
- ],
- "requirements": {
- "python": ">=3.9",
- "system": "libedgetpu",
- "hardware": "Google Coral USB Accelerator"
- },
- "capabilities": [
- "live_detection",
- "bbox_overlay"
- ],
- "ui_unlocks": [
- "detection_overlay",
- "detection_results"
- ],
- "fps_presets": [
- 0.2,
- 0.5,
- 1,
- 3,
- 5,
- 15
- ],
- "model_sizes": [
- "nano"
- ]
- },
{
"id": "camera-claw",
"name": "Camera Claw",
@@ -157,16 +109,46 @@
"url": "https://github.com/SharpAI/CameraClaw",
"repo_url": "https://github.com/SharpAI/CameraClaw",
"code_structure": [
- { "path": "SKILL.md", "desc": "Aegis skill manifest (11 params)" },
- { "path": "package.json", "desc": "Node.js dependencies" },
- { "path": "config.yaml", "desc": "Default params" },
- { "path": "deploy.sh", "desc": "Node.js + Docker bootstrapper" },
- { "path": "deploy.bat", "desc": "Windows bootstrapper" },
- { "path": "scripts/monitor.js", "desc": "Main entry — Docker orchestrator + JSONL protocol" },
- { "path": "scripts/health-check.js", "desc": "Container health checker" },
- { "path": "docs/aegis_openclaw_note.md", "desc": "Aegis integration requirements" }
+ {
+ "path": "SKILL.md",
+ "desc": "Aegis skill manifest (11 params)"
+ },
+ {
+ "path": "package.json",
+ "desc": "Node.js dependencies"
+ },
+ {
+ "path": "config.yaml",
+ "desc": "Default params"
+ },
+ {
+ "path": "deploy.sh",
+ "desc": "Node.js + Docker bootstrapper"
+ },
+ {
+ "path": "deploy.bat",
+ "desc": "Windows bootstrapper"
+ },
+ {
+ "path": "scripts/monitor.js",
+ "desc": "Main entry — Docker orchestrator + JSONL protocol"
+ },
+ {
+ "path": "scripts/health-check.js",
+ "desc": "Container health checker"
+ },
+ {
+ "path": "docs/aegis_openclaw_note.md",
+ "desc": "Aegis integration requirements"
+ }
+ ],
+ "tags": [
+ "security",
+ "sandbox",
+ "monitoring",
+ "openclaw",
+ "ai-agent"
],
- "tags": ["security", "sandbox", "monitoring", "openclaw", "ai-agent"],
"platforms": [
"linux-x64",
"linux-arm64",
@@ -310,6 +292,40 @@
"ui_unlocks": [
"annotation_studio"
]
+ },
+ {
+ "id": "yolo-detection-2026-coral-tpu-macos",
+ "name": "YOLO 2026 Coral TPU (macOS)",
+ "description": "Google Coral Edge TPU natively via ai-edge-litert on macOS",
+ "category": "detection",
+ "path": "skills/detection/yolo-detection-2026-coral-tpu-macos",
+ "tags": [
+ "detection",
+ "yolo",
+ "coral",
+ "edge-tpu"
+ ],
+ "platforms": [
+ "darwin-arm64",
+ "darwin-x64"
+ ]
+ },
+ {
+ "id": "yolo-detection-2026-coral-tpu-win-wsl",
+ "name": "YOLO 2026 Coral TPU (Windows/WSL)",
+ "description": "Google Coral Edge TPU natively mapped to WSL2",
+ "category": "detection",
+ "path": "skills/detection/yolo-detection-2026-coral-tpu-win-wsl",
+ "tags": [
+ "detection",
+ "yolo",
+ "coral",
+ "edge-tpu",
+ "wsl"
+ ],
+ "platforms": [
+ "win-x64"
+ ]
}
]
}
\ No newline at end of file
diff --git a/skills/analysis/home-security-benchmark/scripts/generate-report.cjs b/skills/analysis/home-security-benchmark/scripts/generate-report.cjs
index d5dda66d..9bddd296 100644
--- a/skills/analysis/home-security-benchmark/scripts/generate-report.cjs
+++ b/skills/analysis/home-security-benchmark/scripts/generate-report.cjs
@@ -109,6 +109,7 @@ function buildHTML(allResults, fixtureImages, { liveMode = false, liveStatus = n
tokens: r.tokens || r.data?.tokenTotals?.total,
perfSummary: r.perfSummary || r.data?.perfSummary || null,
system: r.data?.system || {},
+ serverParams: r.data?.serverParams || {},
tokenTotals: r.data?.tokenTotals || {},
suites: (r.data?.suites || []).map(s => ({
name: s.name,
@@ -492,6 +493,15 @@ function renderPerformance() {
let html = '
';
+ if (run.serverParams && typeof run.serverParams === 'object' && Object.keys(run.serverParams).length > 0) {
+ let paramStr = '';
+ for (const k in run.serverParams) {
+ if (paramStr) paramStr += ' | ';
+ paramStr += '' + esc(k) + ': ' + esc(String(run.serverParams[k]));
+ }
+ html += '[Server Params] ' + paramStr + '
';
+ }
+
// Hero cards
html += '';
const ttftAvg = perf?.ttft?.avgMs;
diff --git a/skills/analysis/home-security-benchmark/scripts/run-benchmark.cjs b/skills/analysis/home-security-benchmark/scripts/run-benchmark.cjs
index bf7969d8..e6f3b0b0 100644
--- a/skills/analysis/home-security-benchmark/scripts/run-benchmark.cjs
+++ b/skills/analysis/home-security-benchmark/scripts/run-benchmark.cjs
@@ -230,10 +230,14 @@ function suite(name, fn) {
suites.push({ name, fn, tests: [] });
}
+let targetServerParams = {};
+try { targetServerParams = JSON.parse(process.env.AEGIS_SERVER_PARAMS || '{}'); } catch { }
+
const results = {
timestamp: new Date().toISOString(),
gateway: GATEWAY_URL,
vlm: VLM_URL || null,
+ serverParams: targetServerParams,
system: {},
model: {},
suites: [],
@@ -333,6 +337,9 @@ async function llmCall(messages, opts = {}) {
...(model && { model }),
...(temperature !== undefined && { temperature }),
...(opts.expectJSON && { top_p: 0.8 }),
+ // For JSON-expected tests on local servers, enable server-side JSON mode
+ // which activates prefix buffering to strip hallucinated artifacts
+ ...(opts.expectJSON && !isCloudApi && { response_format: { type: 'json_object' } }),
...(opts.tools && { tools: opts.tools }),
// Model-family-specific params (e.g. reasoning_effort:'none' for Mistral).
// These are merged last so they take precedence over defaults.
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/.gitignore b/skills/detection/yolo-detection-2026-coral-tpu-macos/.gitignore
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/.gitignore
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/.gitignore
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/.travis.yml b/skills/detection/yolo-detection-2026-coral-tpu-macos/.travis.yml
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/.travis.yml
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/.travis.yml
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/CODE_OF_CONDUCT.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/CODE_OF_CONDUCT.md
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/CODE_OF_CONDUCT.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/CODE_OF_CONDUCT.md
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/CONTRIBUTING.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/CONTRIBUTING.md
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/CONTRIBUTING.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/CONTRIBUTING.md
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/Contributions.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/Contributions.md
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/Contributions.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/Contributions.md
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/LICENSE b/skills/detection/yolo-detection-2026-coral-tpu-macos/LICENSE
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/LICENSE
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/LICENSE
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/README.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/README.md
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/README.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/README.md
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/SKILL.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/SKILL.md
similarity index 89%
rename from skills/detection/yolo-detection-2026-coral-tpu/SKILL.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/SKILL.md
index 32a3d17c..e99b4a90 100644
--- a/skills/detection/yolo-detection-2026-coral-tpu/SKILL.md
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/SKILL.md
@@ -1,14 +1,16 @@
---
-name: yolo-detection-2026-coral-tpu
-description: "Google Coral Edge TPU — real-time object detection natively via local Python environment"
+name: yolo-detection-2026-coral-tpu-macos
+description: "Google Coral Edge TPU — real-time object detection natively (macOS / Linux)"
version: 1.0.0
icon: assets/icon.png
entry: scripts/detect.py
-deploy: deploy.sh
+deploy:
+ linux: deploy.sh
+ macos: deploy.sh
runtime: python
requirements:
- platforms: ["linux", "macos", "windows"]
+ platforms: ["linux", "macos"]
@@ -83,10 +85,7 @@ Real-time object detection natively utilizing the Google Coral Edge TPU accelera
## Requirements
-- **Google Coral USB Accelerator** (USB 3.0 port recommended)
-- **libusb** framework (installed automatically on Linux/macOS)
-- Python 3 with the native `pycoral` environment
-- Adequate cooling for sustained inference
+- Python 3.9–3.13
## How It Works
@@ -125,12 +124,6 @@ Real-time object detection natively utilizing the Google Coral Edge TPU accelera
./deploy.sh
```
-### Windows
-```powershell
-# Installs directly to the Microsoft runtime
-.\deploy.bat
-```
-
> **Important Deployment Notice**: The updated `deploy.sh` script will natively halt execution and prompt you securely for your OS `sudo` password to securely register the USB drivers (`libedgetpu`) system-wide. If you refuse the prompt, it gracefully outputs the exact terminal instructions for you to configure it manually.
## Performance
@@ -158,8 +151,9 @@ Same JSONL as `yolo-detection-2026`:
## Installation
+### Linux / macOS
```bash
./deploy.sh
```
-The deployer builds the local native Python virtual environment inline with global TPU hooks. No Docker containers or abstract container-bindings are used.
+The deployer builds the local Python virtual environment and installs the Edge TPU runtime. No Docker required.
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/_config.yml b/skills/detection/yolo-detection-2026-coral-tpu-macos/_config.yml
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/_config.yml
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/_config.yml
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/config.yaml b/skills/detection/yolo-detection-2026-coral-tpu-macos/config.yaml
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/config.yaml
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/config.yaml
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/deploy-linux.sh b/skills/detection/yolo-detection-2026-coral-tpu-macos/deploy-linux.sh
old mode 100755
new mode 100644
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/deploy-linux.sh
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/deploy-linux.sh
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/deploy-macos.sh b/skills/detection/yolo-detection-2026-coral-tpu-macos/deploy-macos.sh
old mode 100755
new mode 100644
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/deploy-macos.sh
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/deploy-macos.sh
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/deploy.sh b/skills/detection/yolo-detection-2026-coral-tpu-macos/deploy.sh
old mode 100755
new mode 100644
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/deploy.sh
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/deploy.sh
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/docker-compose.yml b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker-compose.yml
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/docker-compose.yml
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/docker-compose.yml
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/docker/Dockerfile b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker/Dockerfile
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/docker/Dockerfile
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/docker/Dockerfile
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/docker/README.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker/README.md
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/docker/README.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/docker/README.md
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/docker/compile.sh b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker/compile.sh
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/docker/compile.sh
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/docker/compile.sh
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/docker/docker-compose.yml b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker/docker-compose.yml
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/docker/docker-compose.yml
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/docker/docker-compose.yml
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/docker_out.log b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker_out.log
new file mode 100644
index 00000000..5da2adda
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/docker_out.log
@@ -0,0 +1,129 @@
+Collecting tflite-runtime==2.14.0
+ Downloading tflite_runtime-2.14.0-cp39-cp39-manylinux2014_x86_64.whl (2.4 MB)
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.4/2.4 MB 16.7 MB/s eta 0:00:00
+Collecting pillow
+ Downloading pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (6.6 MB)
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.6/6.6 MB 39.5 MB/s eta 0:00:00
+Collecting numpy
+ Downloading numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (19.5 MB)
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 19.5/19.5 MB 33.4 MB/s eta 0:00:00
+Installing collected packages: pillow, numpy, tflite-runtime
+Successfully installed numpy-2.0.2 pillow-11.3.0 tflite-runtime-2.14.0
+WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
+
+[notice] A new release of pip is available: 23.0.1 -> 26.0.1
+[notice] To update, run: pip install --upgrade pip
+Get:1 http://deb.debian.org/debian trixie InRelease [140 kB]
+Get:2 http://deb.debian.org/debian trixie-updates InRelease [47.3 kB]
+Get:3 http://deb.debian.org/debian-security trixie-security InRelease [43.4 kB]
+Get:4 http://deb.debian.org/debian trixie/main amd64 Packages [9671 kB]
+Get:5 http://deb.debian.org/debian trixie-updates/main amd64 Packages [5412 B]
+Get:6 http://deb.debian.org/debian-security trixie-security/main amd64 Packages [114 kB]
+Fetched 10.0 MB in 1s (10.4 MB/s)
+Reading package lists...
+Reading package lists...
+Building dependency tree...
+Reading state information...
+The following additional packages will be installed:
+ libgnutls30t64 libidn2-0 libp11-kit0 libpsl5t64 libtasn1-6 libunistring5
+ publicsuffix
+Suggested packages:
+ gnutls-bin
+The following NEW packages will be installed:
+ libgnutls30t64 libidn2-0 libp11-kit0 libpsl5t64 libtasn1-6 libunistring5
+ libusb-1.0-0 publicsuffix wget
+0 upgraded, 9 newly installed, 0 to remove and 14 not upgraded.
+Need to get 3926 kB of archives.
+After this operation, 13.0 MB of additional disk space will be used.
+Get:1 http://deb.debian.org/debian trixie/main amd64 libunistring5 amd64 1.3-2 [477 kB]
+Get:2 http://deb.debian.org/debian trixie/main amd64 libidn2-0 amd64 2.3.8-2 [109 kB]
+Get:3 http://deb.debian.org/debian trixie/main amd64 libp11-kit0 amd64 0.25.5-3 [425 kB]
+Get:4 http://deb.debian.org/debian trixie/main amd64 libtasn1-6 amd64 4.20.0-2 [49.9 kB]
+Get:5 http://deb.debian.org/debian trixie/main amd64 libgnutls30t64 amd64 3.8.9-3+deb13u2 [1468 kB]
+Get:6 http://deb.debian.org/debian trixie/main amd64 libpsl5t64 amd64 0.21.2-1.1+b1 [57.2 kB]
+Get:7 http://deb.debian.org/debian trixie/main amd64 wget amd64 1.25.0-2 [984 kB]
+Get:8 http://deb.debian.org/debian trixie/main amd64 libusb-1.0-0 amd64 2:1.0.28-1 [59.6 kB]
+Get:9 http://deb.debian.org/debian trixie/main amd64 publicsuffix all 20250328.1952-0.1 [296 kB]
+debconf: unable to initialize frontend: Dialog
+debconf: (TERM is not set, so the dialog frontend is not usable.)
+debconf: falling back to frontend: Readline
+debconf: unable to initialize frontend: Readline
+debconf: (Can't locate Term/ReadLine.pm in @INC (you may need to install the Term::ReadLine module) (@INC entries checked: /etc/perl /usr/local/lib/x86_64-linux-gnu/perl/5.40.1 /usr/local/share/perl/5.40.1 /usr/lib/x86_64-linux-gnu/perl5/5.40 /usr/share/perl5 /usr/lib/x86_64-linux-gnu/perl-base /usr/lib/x86_64-linux-gnu/perl/5.40 /usr/share/perl/5.40 /usr/local/lib/site_perl) at /usr/share/perl5/Debconf/FrontEnd/Readline.pm line 8, line 9.)
+debconf: falling back to frontend: Teletype
+debconf: unable to initialize frontend: Teletype
+debconf: (This frontend requires a controlling tty.)
+debconf: falling back to frontend: Noninteractive
+Fetched 3926 kB in 0s (13.3 MB/s)
+Selecting previously unselected package libunistring5:amd64.
+(Reading database ...
(Reading database ... 5%
(Reading database ... 10%
(Reading database ... 15%
(Reading database ... 20%
(Reading database ... 25%
(Reading database ... 30%
(Reading database ... 35%
(Reading database ... 40%
(Reading database ... 45%
(Reading database ... 50%
(Reading database ... 55%
(Reading database ... 60%
(Reading database ... 65%
(Reading database ... 70%
(Reading database ... 75%
(Reading database ... 80%
(Reading database ... 85%
(Reading database ... 90%
(Reading database ... 95%
(Reading database ... 100%
(Reading database ... 5644 files and directories currently installed.)
+Preparing to unpack .../0-libunistring5_1.3-2_amd64.deb ...
+Unpacking libunistring5:amd64 (1.3-2) ...
+Selecting previously unselected package libidn2-0:amd64.
+Preparing to unpack .../1-libidn2-0_2.3.8-2_amd64.deb ...
+Unpacking libidn2-0:amd64 (2.3.8-2) ...
+Selecting previously unselected package libp11-kit0:amd64.
+Preparing to unpack .../2-libp11-kit0_0.25.5-3_amd64.deb ...
+Unpacking libp11-kit0:amd64 (0.25.5-3) ...
+Selecting previously unselected package libtasn1-6:amd64.
+Preparing to unpack .../3-libtasn1-6_4.20.0-2_amd64.deb ...
+Unpacking libtasn1-6:amd64 (4.20.0-2) ...
+Selecting previously unselected package libgnutls30t64:amd64.
+Preparing to unpack .../4-libgnutls30t64_3.8.9-3+deb13u2_amd64.deb ...
+Unpacking libgnutls30t64:amd64 (3.8.9-3+deb13u2) ...
+Selecting previously unselected package libpsl5t64:amd64.
+Preparing to unpack .../5-libpsl5t64_0.21.2-1.1+b1_amd64.deb ...
+Unpacking libpsl5t64:amd64 (0.21.2-1.1+b1) ...
+Selecting previously unselected package wget.
+Preparing to unpack .../6-wget_1.25.0-2_amd64.deb ...
+Unpacking wget (1.25.0-2) ...
+Selecting previously unselected package libusb-1.0-0:amd64.
+Preparing to unpack .../7-libusb-1.0-0_2%3a1.0.28-1_amd64.deb ...
+Unpacking libusb-1.0-0:amd64 (2:1.0.28-1) ...
+Selecting previously unselected package publicsuffix.
+Preparing to unpack .../8-publicsuffix_20250328.1952-0.1_all.deb ...
+Unpacking publicsuffix (20250328.1952-0.1) ...
+Setting up libp11-kit0:amd64 (0.25.5-3) ...
+Setting up libunistring5:amd64 (1.3-2) ...
+Setting up libtasn1-6:amd64 (4.20.0-2) ...
+Setting up libusb-1.0-0:amd64 (2:1.0.28-1) ...
+Setting up publicsuffix (20250328.1952-0.1) ...
+Setting up libidn2-0:amd64 (2.3.8-2) ...
+Setting up libgnutls30t64:amd64 (3.8.9-3+deb13u2) ...
+Setting up libpsl5t64:amd64 (0.21.2-1.1+b1) ...
+Setting up wget (1.25.0-2) ...
+Processing triggers for libc-bin (2.41-12) ...
+--2026-03-31 19:47:38-- https://packages.cloud.google.com/apt/pool/coral-edgetpu-stable/libedgetpu1-max_16.0_amd64_0ac21f1924dd4b125d5cfc5f6d0e4a5e.deb
+Resolving packages.cloud.google.com (packages.cloud.google.com)... 142.251.218.142, 2607:f8b0:4005:801::200e
+Connecting to packages.cloud.google.com (packages.cloud.google.com)|142.251.218.142|:443... connected.
+HTTP request sent, awaiting response... 200 OK
+Length: 387960 (379K) [application/vnd.debian.binary-package]
+Saving to: ‘libedgetpu.deb’
+
+ 0K .......... .......... .......... .......... .......... 13% 1.50M 0s
+ 50K .......... .......... .......... .......... .......... 26% 2.72M 0s
+ 100K .......... .......... .......... .......... .......... 39% 3.45M 0s
+ 150K .......... .......... .......... .......... .......... 52% 5.38M 0s
+ 200K .......... .......... .......... .......... .......... 65% 6.17M 0s
+ 250K .......... .......... .......... .......... .......... 79% 5.45M 0s
+ 300K .......... .......... .......... .......... .......... 92% 7.87M 0s
+ 350K .......... .......... ........ 100% 7.11M=0.1s
+
+2026-03-31 19:47:39 (3.67 MB/s) - ‘libedgetpu.deb’ saved [387960/387960]
+
+Traceback (most recent call last):
+ File "/app/wsl_test.py", line 9, in
+ delegate = tflite.load_delegate("libedgetpu.so.1")
+ File "/usr/local/lib/python3.9/site-packages/tflite_runtime/interpreter.py", line 166, in load_delegate
+ delegate = Delegate(library, options)
+ File "/usr/local/lib/python3.9/site-packages/tflite_runtime/interpreter.py", line 73, in __init__
+ self._library = ctypes.pydll.LoadLibrary(library)
+ File "/usr/local/lib/python3.9/ctypes/__init__.py", line 452, in LoadLibrary
+ return self._dlltype(name)
+ File "/usr/local/lib/python3.9/ctypes/__init__.py", line 374, in __init__
+ self._handle = _dlopen(self._name, mode)
+OSError: libedgetpu.so.1: cannot open shared object file: No such file or directory
+Exception ignored in:
+Traceback (most recent call last):
+ File "/usr/local/lib/python3.9/site-packages/tflite_runtime/interpreter.py", line 109, in __del__
+ if self._library is not None:
+AttributeError: 'Delegate' object has no attribute '_library'
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/UsbDk_1.0.22_x64.msi b/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/UsbDk_1.0.22_x64.msi
new file mode 100644
index 00000000..b95c5b95
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/UsbDk_1.0.22_x64.msi differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/edgetpu.dll b/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/edgetpu.dll
new file mode 100644
index 00000000..606b1e8e
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/edgetpu.dll differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/libusb-1.0.dll b/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/libusb-1.0.dll
new file mode 100644
index 00000000..137897ca
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/driver/libusb-1.0.dll differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/lib/udev/rules.d/60-libedgetpu1-max.rules b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/lib/udev/rules.d/60-libedgetpu1-max.rules
new file mode 100644
index 00000000..67b9f472
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/lib/udev/rules.d/60-libedgetpu1-max.rules
@@ -0,0 +1,2 @@
+SUBSYSTEM=="usb",ATTRS{idVendor}=="1a6e",ATTRS{idProduct}=="089a",GROUP="plugdev"
+SUBSYSTEM=="usb",ATTRS{idVendor}=="18d1",ATTRS{idProduct}=="9302",GROUP="plugdev"
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1 b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0 b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/doc/libedgetpu1-max/changelog.gz b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/doc/libedgetpu1-max/changelog.gz
new file mode 100644
index 00000000..2dab41aa
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/doc/libedgetpu1-max/changelog.gz differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/doc/libedgetpu1-max/copyright b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/doc/libedgetpu1-max/copyright
new file mode 100644
index 00000000..699aad16
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/doc/libedgetpu1-max/copyright
@@ -0,0 +1,6 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Source: https://github.com/google-coral/libedgetpu
+
+Files: *
+Copyright: Copyright 2018 Google, LLC
+License: Apache-2.0
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/lintian/overrides/libedgetpu1-max b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/lintian/overrides/libedgetpu1-max
new file mode 100644
index 00000000..3591ed22
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/ext/usr/share/lintian/overrides/libedgetpu1-max
@@ -0,0 +1,4 @@
+# We provide two conflicting package variants with the same soname inside.
+libedgetpu1-max: package-name-doesnt-match-sonames libedgetpu1
+libedgetpu1-max: missing-debconf-dependency-for-preinst
+libedgetpu1-max: too-long-short-description-in-templates libedgetpu/accepted-eula
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/input.json b/skills/detection/yolo-detection-2026-coral-tpu-macos/input.json
new file mode 100644
index 00000000..0db108a2
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/input.json
@@ -0,0 +1 @@
+{"event":"frame","frame_path":"test2.jpg","frame_id":"test2","camera_id":"test2","timestamp":"123"}
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/edgetpu.dll b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/edgetpu.dll
new file mode 100644
index 00000000..606b1e8e
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/edgetpu.dll differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libedgetpu.so.1 b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libedgetpu.so.1
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libedgetpu.so.1 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libedgetpu1-max_16.0_amd64_0ac21f1924dd4b125d5cfc5f6d0e4a5e.deb b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libedgetpu1-max_16.0_amd64_0ac21f1924dd4b125d5cfc5f6d0e4a5e.deb
new file mode 100644
index 00000000..7ff32511
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libedgetpu1-max_16.0_amd64_0ac21f1924dd4b125d5cfc5f6d0e4a5e.deb differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libusb-1.0.dll b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libusb-1.0.dll
new file mode 100644
index 00000000..26808bb2
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/libusb-1.0.dll differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/lib/udev/rules.d/60-libedgetpu1-max.rules b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/lib/udev/rules.d/60-libedgetpu1-max.rules
new file mode 100644
index 00000000..67b9f472
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/lib/udev/rules.d/60-libedgetpu1-max.rules
@@ -0,0 +1,2 @@
+SUBSYSTEM=="usb",ATTRS{idVendor}=="1a6e",ATTRS{idProduct}=="089a",GROUP="plugdev"
+SUBSYSTEM=="usb",ATTRS{idVendor}=="18d1",ATTRS{idProduct}=="9302",GROUP="plugdev"
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/lib/x86_64-linux-gnu/libedgetpu.so.1 b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/lib/x86_64-linux-gnu/libedgetpu.so.1
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/lib/x86_64-linux-gnu/libedgetpu.so.1 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0 b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/doc/libedgetpu1-max/changelog.gz b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/doc/libedgetpu1-max/changelog.gz
new file mode 100644
index 00000000..2dab41aa
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/doc/libedgetpu1-max/changelog.gz differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/doc/libedgetpu1-max/copyright b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/doc/libedgetpu1-max/copyright
new file mode 100644
index 00000000..699aad16
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/doc/libedgetpu1-max/copyright
@@ -0,0 +1,6 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Source: https://github.com/google-coral/libedgetpu
+
+Files: *
+Copyright: Copyright 2018 Google, LLC
+License: Apache-2.0
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/lintian/overrides/libedgetpu1-max b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/lintian/overrides/libedgetpu1-max
new file mode 100644
index 00000000..3591ed22
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/lib/local_deb/usr/share/lintian/overrides/libedgetpu1-max
@@ -0,0 +1,4 @@
+# We provide two conflicting package variants with the same soname inside.
+libedgetpu1-max: package-name-doesnt-match-sonames libedgetpu1
+libedgetpu1-max: missing-debconf-dependency-for-preinst
+libedgetpu1-max: too-long-short-description-in-templates libedgetpu/accepted-eula
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/libedgetpu.deb b/skills/detection/yolo-detection-2026-coral-tpu-macos/libedgetpu.deb
new file mode 100644
index 00000000..7ff32511
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/libedgetpu.deb differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/libedgetpu.so.1 b/skills/detection/yolo-detection-2026-coral-tpu-macos/libedgetpu.so.1
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/libedgetpu.so.1 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/models/README.md b/skills/detection/yolo-detection-2026-coral-tpu-macos/models/README.md
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/models/README.md
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/models/README.md
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/output.txt b/skills/detection/yolo-detection-2026-coral-tpu-macos/output.txt
new file mode 100644
index 00000000..cfb40203
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/output.txt
@@ -0,0 +1,8 @@
+[coral-detect] Starting with params: {}
+[coral-detect] Edge TPU delegate not available: Failed to load delegate from C:\Users\work\.aegis-ai\skills\yolo-detection-2026-coral-tpu\lib\edgetpu.dll
+
+[coral-detect] Falling back to CPU inference
+[coral-detect] Falling back to universal SSD MobileNet CPU model
+[coral-detect] Loaded model on CPU: C:\Users\work\.aegis-ai\skills\yolo-detection-2026-coral-tpu\models\ssd_mobilenet_v2_coco_quant_postprocess.tflite
+{"event": "ready", "model": "yolo26n_edgetpu", "device": "cpu", "format": "edgetpu_tflite", "runtime": "ai-edge-litert", "tpu_count": 0, "classes": 80, "input_size": 320, "fps": 5}
+[coral-detect] Ready waiting for frame events on stdin
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/requirements.txt b/skills/detection/yolo-detection-2026-coral-tpu-macos/requirements.txt
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/requirements.txt
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/requirements.txt
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/scripts/compile_model.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/compile_model.py
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/scripts/compile_model.py
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/compile_model.py
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/scripts/compile_model_colab.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/compile_model_colab.py
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/scripts/compile_model_colab.py
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/compile_model_colab.py
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/detect.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/detect.py
new file mode 100644
index 00000000..cc9633a2
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/detect.py
@@ -0,0 +1,569 @@
+#!/usr/bin/env python3
+"""
+Coral TPU Object Detection — JSONL stdin/stdout protocol
+Uses ai-edge-litert (LiteRT) with Edge TPU delegate for hardware acceleration.
+Same protocol as yolo-detection-2026/scripts/detect.py.
+
+Communication:
+ stdin: {"event": "frame", "frame_id": N, "frame_path": "...", ...}
+ stdout: {"event": "detections", "frame_id": N, "objects": [...]}
+ stderr: Debug logs (ignored by Aegis parser)
+"""
+
+import json
+import os
+import sys
+import time
+import signal
+import threading
+from pathlib import Path
+from typing import Optional, List, Dict, Any, Tuple
+
+# ─── Windows DLL search path fix (MUST happen before any native import) ───────
+_LIB_DIR = Path(__file__).parent.parent / "lib"
+if sys.platform == "win32" and _LIB_DIR.exists():
+ os.add_dll_directory(str(_LIB_DIR))
+ os.environ["PATH"] = str(_LIB_DIR) + os.pathsep + os.environ.get("PATH", "")
+
+import numpy as np
+from PIL import Image
+
+# ─── LiteRT imports ────────────────────────────────────────────────────────────
+HAS_LITERT = False
+
+try:
+ import tflite_runtime.interpreter as litert # interpreter as litert
+ HAS_LITERT = True
+except ImportError:
+ sys.stderr.write("[coral-detect] WARNING: ai-edge-litert not installed\n")
+
+
+def log(message: str) -> None:
+ sys.stderr.write(f"[coral-detect] {message}\n")
+ sys.stderr.flush()
+
+
+def emit_json(payload: Dict[str, Any]) -> None:
+ sys.stdout.write(json.dumps(payload, ensure_ascii=False) + "\n")
+ sys.stdout.flush()
+
+
+def _edgetpu_lib_name():
+ """Return the platform-specific libedgetpu shared library name."""
+ import platform
+ system = platform.system()
+ if system == "Linux":
+ return "libedgetpu.so.1"
+ elif system == "Darwin":
+ return "libedgetpu.1.dylib"
+ elif system == "Windows":
+ local_dll = _LIB_DIR / "edgetpu.dll"
+ if local_dll.exists():
+ return str(local_dll.resolve())
+ return "edgetpu.dll"
+ return "libedgetpu.so.1"
+
+
+# ─── COCO class names (80 classes) ────────────────────────────────────────────
+COCO_CLASSES = [
+ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
+ "truck", "boat", "traffic light", "fire hydrant", "stop sign",
+ "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
+ "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
+ "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
+ "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
+ "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
+ "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
+ "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
+ "couch", "potted plant", "bed", "dining table", "toilet", "tv",
+ "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
+ "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
+ "scissors", "teddy bear", "hair drier", "toothbrush"
+]
+
+
+class PerfTracker:
+ """Tracks per-frame timing and emits aggregate stats."""
+
+ def __init__(self, emit_interval=50):
+ self.emit_interval = emit_interval
+ self.timings: List[Dict[str, float]] = []
+ self.total_frames = 0
+
+ def record(self, timing_dict: Dict[str, float]) -> None:
+ self.timings.append(timing_dict)
+ self.total_frames += 1
+
+ def should_emit(self) -> bool:
+ return len(self.timings) >= self.emit_interval
+
+ def emit_and_reset(self) -> Optional[Dict[str, Any]]:
+ if not self.timings:
+ return None
+
+ stats = {"event": "perf_stats", "total_frames": len(self.timings), "timings_ms": {}}
+ for key in self.timings[0]:
+ values = sorted([t[key] for t in self.timings])
+ n = len(values)
+ p95_idx = min(n - 1, int(n * 0.95))
+ p99_idx = min(n - 1, int(n * 0.99))
+ stats["timings_ms"][key] = {
+ "avg": round(sum(values) / n, 2),
+ "p50": round(values[n // 2], 2),
+ "p95": round(values[p95_idx], 2),
+ "p99": round(values[p99_idx], 2),
+ }
+ self.timings = []
+ return stats
+
+
+class TPUHealthWatchdog:
+ """
+ Detects two distinct TPU failure modes:
+
+ 1. Inference hang: interpreter.invoke() takes longer than `invoke_timeout_s`.
+ 2. Silent stall: consecutive empty results after previous successful detections.
+ """
+
+ def __init__(self, invoke_timeout_s=10, stall_frames=30, min_active_frames=5):
+ self.invoke_timeout_s = invoke_timeout_s
+ self.stall_frames = stall_frames
+ self.min_active_frames = min_active_frames
+
+ self._consecutive_zero = 0
+ self._total_frames_with_detections = 0
+ self._invoke_exception: Optional[Exception] = None
+
+ def run_invoke(self, interpreter) -> None:
+ """Run interpreter.invoke() with a hard timeout."""
+ self._invoke_exception = None
+ completed = [False]
+
+ def _invoke():
+ try:
+ interpreter.invoke()
+ completed[0] = True
+ except Exception as e:
+ self._invoke_exception = e
+
+ t = threading.Thread(target=_invoke, daemon=True)
+ t.start()
+ t.join(timeout=self.invoke_timeout_s)
+
+ if t.is_alive():
+ raise RuntimeError(
+ f"TPU invoke() timed out after {self.invoke_timeout_s}s — "
+ "USB connection may be lost or TPU is locked up"
+ )
+
+ if self._invoke_exception is not None:
+ raise self._invoke_exception
+
+ def record(self, n_detections: int) -> Optional[str]:
+ if n_detections > 0:
+ self._total_frames_with_detections += 1
+ self._consecutive_zero = 0
+ return None
+
+ self._consecutive_zero += 1
+ if (
+ self._total_frames_with_detections >= self.min_active_frames
+ and self._consecutive_zero >= self.stall_frames
+ ):
+ return "stall"
+
+ return None
+
+ def reset_stall(self) -> None:
+ self._consecutive_zero = 0
+
+
+class CoralDetector:
+ """Edge TPU object detector using ai-edge-litert with libedgetpu delegate."""
+
+ def __init__(self, params: Dict[str, Any]):
+ self.params = params
+ self.confidence = float(params.get("confidence", 0.5))
+ self.input_size = int(params.get("input_size", 320))
+ self.interpreter = None
+ self.tpu_count = 0
+ self.device_name = "unknown"
+ self.watchdog = TPUHealthWatchdog(
+ invoke_timeout_s=10,
+ stall_frames=30,
+ min_active_frames=5,
+ )
+
+ classes_str = params.get("classes", "person,car,dog,cat")
+ self.target_classes = set(c.strip().lower() for c in classes_str.split(",") if c.strip())
+
+ self._load_model()
+
+ def _find_model_path(self) -> Optional[str]:
+ """Find the compiled Edge TPU model."""
+ candidates = [
+ Path("/app/models"),
+ Path(__file__).parent.parent / "models",
+ ]
+
+ for d in candidates:
+ if not d.exists():
+ continue
+ for pattern in [
+ "*_full_integer_quant_edgetpu.tflite",
+ "*_edgetpu.tflite",
+ "*.tflite",
+ ]:
+ matches = sorted(d.glob(pattern))
+ if matches:
+ return str(matches[0])
+
+ return None
+
+ def _load_model(self) -> None:
+ """Load model onto Edge TPU (or CPU fallback)."""
+ if not HAS_LITERT:
+ log("FATAL: ai-edge-litert not available. pip install ai-edge-litert")
+ emit_json({"event": "error", "message": "ai-edge-litert not installed", "retriable": False})
+ sys.exit(1)
+
+ model_path = self._find_model_path()
+ if not model_path:
+ log("ERROR: No .tflite model found in models/")
+ emit_json({"event": "error", "message": "No Edge TPU model found", "retriable": False})
+ sys.exit(1)
+
+ edgetpu_lib = _edgetpu_lib_name()
+ try:
+ if hasattr(litert, "Delegate"):
+ original_del = getattr(litert.Delegate, "__del__", None)
+ if original_del and not hasattr(litert.Delegate, "_patched_del"):
+ def safe_del(self):
+ try:
+ original_del(self)
+ except AttributeError:
+ pass
+ litert.Delegate.__del__ = safe_del
+ litert.Delegate._patched_del = True
+
+ delegate = litert.load_delegate(edgetpu_lib)
+ self.interpreter = litert.Interpreter(
+ model_path=model_path,
+ experimental_delegates=[delegate],
+ )
+ self.interpreter.allocate_tensors()
+ self.device_name = "coral"
+ self.tpu_count = 1
+ log(f"Loaded model on Edge TPU: {model_path}")
+ except (ValueError, OSError) as e:
+ log(f"Edge TPU delegate not available: {e}")
+ log("Falling back to CPU inference")
+ self._load_cpu_fallback(model_path)
+
+ def _load_cpu_fallback(self, model_path: str) -> None:
+ """Fallback to CPU-only LiteRT interpreter."""
+ cpu_path = model_path.replace("_edgetpu.tflite", ".tflite")
+ if not os.path.exists(cpu_path):
+ universal_fallback = os.path.join(
+ os.path.dirname(model_path),
+ "ssd_mobilenet_v2_coco_quant_postprocess.tflite",
+ )
+ if os.path.exists(universal_fallback):
+ log("Falling back to universal SSD MobileNet CPU model")
+ cpu_path = universal_fallback
+ elif "edgetpu" in model_path.lower():
+ log("FATAL: Cannot load Edge TPU compiled model on pure CPU, and no fallback model exists.")
+ emit_json({
+ "event": "error",
+ "message": "No Edge TPU plugged in and no pure-CPU fallback model found.",
+ "retriable": False,
+ })
+ sys.exit(1)
+ else:
+ cpu_path = model_path
+
+ try:
+ self.interpreter = litert.Interpreter(model_path=cpu_path)
+ self.interpreter.allocate_tensors()
+ self.device_name = "cpu"
+ log(f"Loaded model on CPU: {cpu_path}")
+ except Exception as e:
+ log(f"FATAL: Cannot load model: {e}")
+ emit_json({"event": "error", "message": f"Cannot load model: {e}", "retriable": False})
+ sys.exit(1)
+
+ def _prepare_input_tensor(self, img_resized: Image.Image, input_details: Dict[str, Any]) -> np.ndarray:
+ """
+ Prepare input tensor matching model dtype and quantization parameters.
+
+ Fixes crash where INT8 models were being fed UINT8 tensors.
+ """
+ req_dtype = input_details["dtype"]
+ input_shape = input_details["shape"]
+ quant = input_details.get("quantization", (0.0, 0))
+ scale, zero_point = quant if quant is not None else (0.0, 0)
+
+ img_np = np.asarray(img_resized)
+
+ if req_dtype == np.uint8:
+ tensor = img_np.astype(np.uint8)
+
+ elif req_dtype == np.int8:
+ # Quantize from float pixel domain using model input quantization.
+ # If scale metadata is missing/zero, fall back to common full-int8 image mapping.
+ if scale and scale > 0:
+ tensor_f = img_np.astype(np.float32) / scale + zero_point
+ tensor = np.clip(np.round(tensor_f), -128, 127).astype(np.int8)
+ else:
+ tensor = (img_np.astype(np.int16) - 128).clip(-128, 127).astype(np.int8)
+
+ elif req_dtype == np.float32:
+ tensor = img_np.astype(np.float32)
+ if scale and scale > 0:
+ tensor = (tensor - zero_point) * scale
+ else:
+ tensor /= 255.0
+
+ else:
+ tensor = img_np.astype(req_dtype)
+
+ return np.expand_dims(tensor, axis=0).reshape(input_shape)
+
+ def _dequantize_output(self, arr: np.ndarray, detail: Dict[str, Any]) -> np.ndarray:
+ """Convert quantized output tensor to float if needed."""
+ if np.issubdtype(arr.dtype, np.floating):
+ return arr.astype(np.float32)
+
+ scale, zero_point = detail.get("quantization", (0.0, 0))
+ if scale and scale > 0:
+ return (arr.astype(np.float32) - zero_point) * scale
+ return arr.astype(np.float32)
+
+ def _parse_ssd_outputs(
+ self,
+ output_details: List[Dict[str, Any]],
+ orig_w: int,
+ orig_h: int,
+ ) -> List[Dict[str, Any]]:
+ """Parse SSD MobileNet-style outputs: boxes, classes, scores, count."""
+ boxes = self._dequantize_output(
+ self.interpreter.get_tensor(output_details[0]["index"]),
+ output_details[0],
+ )[0]
+ classes = self._dequantize_output(
+ self.interpreter.get_tensor(output_details[1]["index"]),
+ output_details[1],
+ )[0]
+ scores = self._dequantize_output(
+ self.interpreter.get_tensor(output_details[2]["index"]),
+ output_details[2],
+ )[0]
+ count_tensor = self.interpreter.get_tensor(output_details[3]["index"])
+ count = int(np.array(count_tensor).flatten()[0])
+
+ objects: List[Dict[str, Any]] = []
+ for i in range(min(count, len(scores), 100)):
+ score = float(scores[i])
+ if score < self.confidence:
+ continue
+
+ class_id = int(classes[i])
+ class_name = COCO_CLASSES[class_id] if 0 <= class_id < len(COCO_CLASSES) else f"class_{class_id}"
+
+ if self.target_classes and class_name.lower() not in self.target_classes:
+ continue
+
+ y1, x1, y2, x2 = [float(v) for v in boxes[i]]
+ x1 = max(0.0, min(1.0, x1))
+ y1 = max(0.0, min(1.0, y1))
+ x2 = max(0.0, min(1.0, x2))
+ y2 = max(0.0, min(1.0, y2))
+
+ objects.append({
+ "label": class_name,
+ "confidence": round(score, 4),
+ "bbox": {
+ "x": round(x1 * orig_w, 1),
+ "y": round(y1 * orig_h, 1),
+ "width": round((x2 - x1) * orig_w, 1),
+ "height": round((y2 - y1) * orig_h, 1),
+ },
+ })
+
+ return objects
+
+ def detect_frame(self, frame_path: str) -> Tuple[List[Dict[str, Any]], Dict[str, float], Optional[str]]:
+ """Run detection on a single frame."""
+ t0 = time.perf_counter()
+
+ try:
+ img = Image.open(frame_path).convert("RGB")
+ except Exception as e:
+ log(f"ERROR reading frame: {e}")
+ return [], {}, None
+
+ t_read = time.perf_counter()
+
+ input_details = self.interpreter.get_input_details()[0]
+ input_shape = input_details["shape"]
+ h, w = int(input_shape[1]), int(input_shape[2])
+ orig_w, orig_h = img.size
+ img_resized = img.resize((w, h), Image.LANCZOS)
+
+ try:
+ input_data = self._prepare_input_tensor(img_resized, input_details)
+ self.interpreter.set_tensor(input_details["index"], input_data)
+ except Exception as e:
+ log(f"ERROR preparing input tensor: dtype={input_details.get('dtype')} quant={input_details.get('quantization')} err={e}")
+ return [], {}, "input_error"
+
+ t_pre = time.perf_counter()
+
+ try:
+ self.watchdog.run_invoke(self.interpreter)
+ except RuntimeError as e:
+ log(f"TPU invoke() failed: {e}")
+ return [], {}, "hang"
+ except Exception as e:
+ log(f"Inference failed: {e}")
+ return [], {}, "invoke_error"
+
+ t_infer = time.perf_counter()
+
+ objects: List[Dict[str, Any]] = []
+ output_details = self.interpreter.get_output_details()
+
+ try:
+ if len(output_details) >= 4:
+ objects = self._parse_ssd_outputs(output_details, orig_w, orig_h)
+ else:
+ log(f"Unsupported model output layout: {len(output_details)} tensors")
+ except Exception as e:
+ log(f"ERROR parsing outputs: {e}")
+ return [], {}, "parse_error"
+
+ t_post = time.perf_counter()
+
+ timings = {
+ "read": round((t_read - t0) * 1000.0, 2),
+ "preprocess": round((t_pre - t_read) * 1000.0, 2),
+ "infer": round((t_infer - t_pre) * 1000.0, 2),
+ "postprocess": round((t_post - t_infer) * 1000.0, 2),
+ "total": round((t_post - t0) * 1000.0, 2),
+ }
+
+ health = self.watchdog.record(len(objects))
+ return objects, timings, health
+
+
+_shutdown = False
+
+
+def _handle_signal(signum, frame):
+ global _shutdown
+ _shutdown = True
+ log(f"Received signal {signum}, shutting down...")
+
+
+def main() -> None:
+ signal.signal(signal.SIGINT, _handle_signal)
+ signal.signal(signal.SIGTERM, _handle_signal)
+
+ raw_params = os.environ.get("AEGIS_SKILL_PARAMS", "{}")
+ try:
+ params = json.loads(raw_params)
+ except Exception:
+ params = {}
+
+ log(f"Starting with params: {json.dumps(params)}")
+
+ detector = CoralDetector(params)
+ perf = PerfTracker(emit_interval=50)
+
+ emit_json({
+ "event": "ready",
+ "model": "yolo26n_edgetpu",
+ "device": detector.device_name,
+ "format": "edgetpu_tflite",
+ "runtime": "ai-edge-litert",
+ "tpu_count": detector.tpu_count,
+ "classes": len(COCO_CLASSES),
+ "input_size": detector.input_size,
+ "fps": int(params.get("fps", 5)),
+ })
+ log("Ready — waiting for frame events on stdin")
+
+ while not _shutdown:
+ line = sys.stdin.readline()
+ if not line:
+ break
+
+ line = line.strip()
+ if not line:
+ continue
+
+ try:
+ msg = json.loads(line)
+ except json.JSONDecodeError:
+ log(f"Ignoring invalid JSON line: {line[:200]}")
+ continue
+
+ event = msg.get("event")
+ if event == "shutdown":
+ break
+
+ if event != "frame":
+ continue
+
+ frame_path = msg.get("frame_path")
+ frame_id = msg.get("frame_id")
+ camera_id = msg.get("camera_id")
+ timestamp = msg.get("timestamp")
+
+ if not frame_path:
+ emit_json({
+ "event": "error",
+ "frame_id": frame_id,
+ "message": "Missing frame_path",
+ "retriable": True,
+ })
+ continue
+
+ objects, timings, health = detector.detect_frame(frame_path)
+
+ emit_json({
+ "event": "detections",
+ "frame_id": frame_id,
+ "camera_id": camera_id,
+ "timestamp": timestamp,
+ "objects": objects,
+ })
+
+ if timings:
+ perf.record(timings)
+ if perf.should_emit():
+ stats = perf.emit_and_reset()
+ if stats:
+ emit_json(stats)
+
+ if health == "hang":
+ emit_json({
+ "event": "error",
+ "frame_id": frame_id,
+ "message": "TPU inference hang detected",
+ "retriable": True,
+ })
+ elif health == "stall":
+ emit_json({
+ "event": "warning",
+ "frame_id": frame_id,
+ "message": "TPU may be stalled: too many consecutive empty detections",
+ })
+
+ stats = perf.emit_and_reset()
+ if stats:
+ emit_json(stats)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/scripts/detect.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/detect.py.bak
similarity index 72%
rename from skills/detection/yolo-detection-2026-coral-tpu/scripts/detect.py
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/detect.py.bak
index 7a9422a9..d2aecd0a 100644
--- a/skills/detection/yolo-detection-2026-coral-tpu/scripts/detect.py
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/detect.py.bak
@@ -19,6 +19,17 @@
from pathlib import Path
from typing import Optional, Tuple, List, Dict, Any
+# ─── Windows DLL search path fix (MUST happen before any native import) ───────
+# Python 3.8+ no longer searches PATH for DLLs loaded by native C extensions.
+# We must register our local lib/ directory so that when ai_edge_litert loads
+# edgetpu.dll, Windows can also find libusb-1.0.dll in the same folder.
+# Native delegates loaded via C++ LoadLibrary also bypass Python's DLL directory,
+# so we must append it to the system PATH environment variable as well.
+_LIB_DIR = Path(__file__).parent.parent / "lib"
+if sys.platform == "win32" and _LIB_DIR.exists():
+ os.add_dll_directory(str(_LIB_DIR))
+ os.environ["PATH"] = str(_LIB_DIR) + os.pathsep + os.environ.get("PATH", "")
+
import numpy as np
from PIL import Image
@@ -36,7 +47,7 @@
except ImportError:
sys.stderr.write("[coral-detect] WARNING: ai-edge-litert not installed\n")
-# Determine the correct delegate library name per platform
+
def _edgetpu_lib_name():
"""Return the platform-specific libedgetpu shared library name."""
import platform
@@ -46,6 +57,13 @@ def _edgetpu_lib_name():
elif system == "Darwin":
return "libedgetpu.1.dylib"
elif system == "Windows":
+ # os.add_dll_directory() already registered our lib/ folder above,
+ # so Windows can resolve all transitive dependencies (libusb-1.0.dll etc.).
+ # Use just the bare name so load_delegate() finds it through the
+ # registered DLL directories rather than trying to parse a full path.
+ local_dll = _LIB_DIR / "edgetpu.dll"
+ if local_dll.exists():
+ return str(local_dll.resolve())
return "edgetpu.dll"
return "libedgetpu.so.1"
@@ -263,7 +281,16 @@ def _load_cpu_fallback(self, model_path):
# Use a non-edgetpu model if available
cpu_path = model_path.replace("_edgetpu.tflite", ".tflite")
if not os.path.exists(cpu_path):
- cpu_path = model_path
+ universal_fallback = os.path.join(os.path.dirname(model_path), "ssd_mobilenet_v2_coco_quant_postprocess.tflite")
+ if os.path.exists(universal_fallback):
+ log("Falling back to universal SSD MobileNet CPU model")
+ cpu_path = universal_fallback
+ elif "edgetpu" in model_path.lower():
+ log("FATAL: Cannot load Edge TPU compiled model on pure CPU, and no fallback model exists.")
+ emit_json({"event": "error", "message": "No Edge TPU plugged in and no pure-CPU fallback model found.", "retriable": False})
+ sys.exit(1)
+ else:
+ cpu_path = model_path
try:
self.interpreter = litert.Interpreter(model_path=cpu_path)
@@ -295,8 +322,15 @@ def detect_frame(self, frame_path):
orig_w, orig_h = img.size
img_resized = img.resize((w, h), Image.LANCZOS)
- # Set input tensor
- input_data = np.expand_dims(np.array(img_resized, dtype=np.uint8), axis=0)
+ # Prepare input data according to required dtype
+ req_dtype = input_details["dtype"]
+ raw_img = np.array(img_resized, dtype=np.float32)
+ if req_dtype == np.int8:
+ # Shift 0-255 to -128-127 for INT8 models
+ input_data = np.expand_dims((raw_img - 128).astype(np.int8), axis=0)
+ else:
+ input_data = np.expand_dims(raw_img.astype(np.uint8), axis=0)
+
self.interpreter.set_tensor(input_details["index"], input_data)
# Run inference with hard timeout via watchdog
@@ -342,9 +376,111 @@ def detect_frame(self, frame_path):
]
})
elif len(output_details) >= 1:
- # YOLO-style output: single tensor with [N, 6] or similar
+ # YOLO-style output: single tensor with [1, num_classes + 4, num_anchors]
output = self.interpreter.get_tensor(output_details[0]["index"])
- log(f"Single-output model, shape: {output.shape}")
+ out_dtype = output_details[0]["dtype"]
+ q_scale, q_zero = output_details[0]["quantization"]
+
+ # Dequantize if returning INT8
+ if out_dtype == np.int8 and q_scale > 0:
+ output = (output.astype(np.float32) - q_zero) * q_scale
+
+ pred = output[0] # Shape: e.g. (84, 2100) or (2100, 84)
+
+ # Ultralytics tends to output (num_classes + 4, num_anchors), so transpose if so
+ if pred.shape[0] == len(COCO_CLASSES) + 4:
+ pred = pred.transpose() # Now (num_anchors, 84)
+
+ # Parse boxes and scores
+ boxes_cx = pred[:, 0]
+ boxes_cy = pred[:, 1]
+ boxes_w = pred[:, 2]
+ boxes_h = pred[:, 3]
+
+ # Scores for all classes
+ class_scores = pred[:, 4:]
+ max_scores = np.max(class_scores, axis=1)
+ class_ids = np.argmax(class_scores, axis=1)
+
+ # Filter by confidence
+ mask = max_scores >= self.confidence
+
+ boxes_cx = boxes_cx[mask]
+ boxes_cy = boxes_cy[mask]
+ boxes_w = boxes_w[mask]
+ boxes_h = boxes_h[mask]
+ class_ids = class_ids[mask]
+ max_scores = max_scores[mask]
+
+ # Convert cx, cy, w, h -> y1, x1, y2, x2
+ x1 = boxes_cx - boxes_w / 2
+ y1 = boxes_cy - boxes_h / 2
+ x2 = boxes_cx + boxes_w / 2
+ y2 = boxes_cy + boxes_h / 2
+
+ # Very basic NMS
+ # First, filter to target classes
+ filtered_indices = []
+ for i in range(len(max_scores)):
+ cid = class_ids[i]
+ cname = COCO_CLASSES[cid] if cid < len(COCO_CLASSES) else f"class_{cid}"
+ if self.target_classes and cname not in self.target_classes:
+ continue
+ filtered_indices.append(i)
+
+ if len(filtered_indices) > 0:
+ x1_f = x1[filtered_indices]
+ y1_f = y1[filtered_indices]
+ x2_f = x2[filtered_indices]
+ y2_f = y2[filtered_indices]
+ scores_f = max_scores[filtered_indices]
+ class_ids_f = class_ids[filtered_indices]
+
+ # Numpy NMS implementation
+ areas = (x2_f - x1_f) * (y2_f - y1_f)
+ order = scores_f.argsort()[::-1]
+
+ keep = []
+ # Keep top 25 at most
+ while order.size > 0 and len(keep) < 25:
+ i = order[0]
+ keep.append(i)
+ if order.size == 1:
+ break
+
+ xx1 = np.maximum(x1_f[i], x1_f[order[1:]])
+ yy1 = np.maximum(y1_f[i], y1_f[order[1:]])
+ xx2 = np.minimum(x2_f[i], x2_f[order[1:]])
+ yy2 = np.minimum(y2_f[i], y2_f[order[1:]])
+
+ w = np.maximum(0.0, xx2 - xx1)
+ h = np.maximum(0.0, yy2 - yy1)
+ inter = w * h
+ iou = inter / (areas[i] + areas[order[1:]] - inter)
+
+ inds = np.where(iou <= 0.45)[0] # NMS threshold
+ order = order[inds + 1]
+
+ for k in keep:
+ cid = class_ids_f[k]
+ cname = COCO_CLASSES[cid] if cid < len(COCO_CLASSES) else f"class_{cid}"
+
+ # Normalizing against original input resolution is done at UI or here
+ # Typical YOLOv8 models output values scaled by input resolution (e.g. 0 to 320)
+ # We normalize it to 0.0 - 1.0!
+ nx1 = float(x1_f[k]) / self.input_size
+ ny1 = float(y1_f[k]) / self.input_size
+ nx2 = float(x2_f[k]) / self.input_size
+ ny2 = float(y2_f[k]) / self.input_size
+
+ objects.append({
+ "class": cname,
+ "confidence": float(round(scores_f[k], 3)),
+ "bbox": [
+ int(nx1 * orig_w), int(ny1 * orig_h),
+ int(nx2 * orig_w), int(ny2 * orig_h)
+ ]
+ })
t_post = time.perf_counter()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/scripts/install_pycoral.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/install_pycoral.py
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/scripts/install_pycoral.py
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/install_pycoral.py
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/scripts/tpu_probe.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/tpu_probe.py
similarity index 85%
rename from skills/detection/yolo-detection-2026-coral-tpu/scripts/tpu_probe.py
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/tpu_probe.py
index 3f208a40..ded1a397 100644
--- a/skills/detection/yolo-detection-2026-coral-tpu/scripts/tpu_probe.py
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/scripts/tpu_probe.py
@@ -11,18 +11,29 @@
"""
import json
-import platform
import sys
+from pathlib import Path
+# ─── Windows DLL search path fix (MUST happen before any native import) ───────
+# Python 3.8+ no longer searches PATH for DLLs loaded by native C extensions.
+_LIB_DIR = Path(__file__).parent.parent / "lib"
+if sys.platform == "win32" and _LIB_DIR.exists():
+ import os
+ os.add_dll_directory(str(_LIB_DIR))
def _edgetpu_lib_name():
"""Return the platform-specific libedgetpu shared library name."""
+ import platform
+ from pathlib import Path
system = platform.system()
if system == "Linux":
return "libedgetpu.so.1"
elif system == "Darwin":
return "libedgetpu.1.dylib"
elif system == "Windows":
+ local_dll = _LIB_DIR / "edgetpu.dll"
+ if local_dll.exists():
+ return str(local_dll.resolve())
return "edgetpu.dll"
return "libedgetpu.so.1"
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/skills.json b/skills/detection/yolo-detection-2026-coral-tpu-macos/skills.json
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/skills.json
rename to skills/detection/yolo-detection-2026-coral-tpu-macos/skills.json
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/test2.jpg b/skills/detection/yolo-detection-2026-coral-tpu-macos/test2.jpg
new file mode 100644
index 00000000..6d0b17e9
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-macos/test2.jpg differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-macos/wsl_test.py b/skills/detection/yolo-detection-2026-coral-tpu-macos/wsl_test.py
new file mode 100644
index 00000000..f4e342a4
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-macos/wsl_test.py
@@ -0,0 +1,15 @@
+import sys
+import numpy as np
+
+try:
+ import tflite_runtime.interpreter as tflite
+except ImportError:
+ import ai_edge_litert.interpreter as tflite
+
+delegate = tflite.load_delegate("libedgetpu.so.1")
+interpreter = tflite.Interpreter("models/yolo26n_full_integer_quant_edgetpu.tflite", experimental_delegates=[delegate])
+interpreter.allocate_tensors()
+inp = interpreter.get_input_details()[0]
+interpreter.set_tensor(inp["index"], np.zeros(inp["shape"], dtype=inp["dtype"]))
+interpreter.invoke()
+print("SUCCESSFULLY INVOKED EDGE TPU ON ZERO TENSOR IN WSL!")
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/.gitignore b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/.gitignore
new file mode 100644
index 00000000..4f36dbf4
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/.gitignore
@@ -0,0 +1,53 @@
+# ─── Python ───────────────────────────────────────────
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+*.egg-info/
+*.egg
+dist/
+.eggs/
+.venv/
+venv/
+env/
+
+# ─── ML Models & Data ────────────────────────────────
+*.pt
+*.pth
+*.onnx
+*.tflite
+*.pb
+*.h5
+*.safetensors
+model/
+model.tgz
+*.tgz
+weights/
+runs/
+
+# ─── Build ────────────────────────────────────────────
+build/build/*
+build/dist/*
+build/runtime_arch
+build/*
+runtime
+
+# ─── Node ─────────────────────────────────────────────
+node_modules/
+
+# ─── Docker ───────────────────────────────────────────
+docker/db
+volumes
+
+# ─── IDE & OS ─────────────────────────────────────────
+.DS_Store
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+.env.local
+
+# ─── Project Specific ────────────────────────────────
+src/yolov7_reid/src/models/mgn_R50-ibn.onnx
+*.tflite
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/.travis.yml b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/.travis.yml
new file mode 100644
index 00000000..aa84e4b6
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/.travis.yml
@@ -0,0 +1,34 @@
+arch:
+ - arm64
+os: linux
+dist: xenial
+language: shell
+services:
+ - docker
+env:
+ global:
+ - DOCKER_CACHE_FILE=/home/travis/docker/cache.tar.gz
+before_script:
+ # Every 30 seconds, look for the build log file. If it exists, then
+ # start watching its contents and printing them to stdout as they
+ # change. This has two effects:
+ # 1. it avoids Travis timing out because the build outputs nothing
+ # 2. it makes it more obvious what part of the build, if any, gets stuck
+ - while sleep 30; do tail $TRAVIS_BUILD_DIR/log -f ; done &
+script:
+ #- cd $TRAVIS_BUILD_DIR/docker/build/tensorflow && docker build -f Dockerfile.arm64v8 -t shareai/tensorflow:arm64v8_latest .
+ #- cd $TRAVIS_BUILD_DIR/docker/build/od && docker build -f Dockerfile.arm64v8 -t shareai/od:arm64v8_latest .
+ - "travis_wait 50 sleep 3000 &"
+ - docker-compose -f $TRAVIS_BUILD_DIR/docker/build/docker-compose-arm64v8.yml build > $TRAVIS_BUILD_DIR/log
+after_success:
+ - docker login --username shareai --password $DOCKER_HUB_TOKEN
+ - docker push shareai/tensorflow:arm64v8_latest
+ #- docker push shareai/od:arm64v8_latest
+ - cd $TRAVIS_BUILD_DIR/docker/build && docker-compose -f docker-compose-arm64v8.yml push
+cache:
+ directories:
+ - /home/travis/docker/
+before_install:
+ - if [ -f ${DOCKER_CACHE_FILE} ]; then gunzip -c ${DOCKER_CACHE_FILE} | docker load; fi
+before_cache:
+ - if [[ ${TRAVIS_BRANCH} == "master" ]] && [[ ${TRAVIS_PULL_REQUEST} == "false" ]]; then docker save $(docker images -a -q) | gzip > ${DOCKER_CACHE_FILE}; fi
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/CODE_OF_CONDUCT.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..a3d85af5
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/CODE_OF_CONDUCT.md
@@ -0,0 +1,49 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the project maintainers. All complaints will be reviewed and
+investigated promptly and fairly.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org),
+version 2.1, available at
+https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/CONTRIBUTING.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/CONTRIBUTING.md
new file mode 100644
index 00000000..217958b5
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/CONTRIBUTING.md
@@ -0,0 +1,54 @@
+# Contributing to DeepCamera
+
+Thank you for your interest in contributing to DeepCamera! This project is evolving into an open-source AI skill platform for [SharpAI Aegis](https://sharpai.org).
+
+## How to Contribute
+
+### 🛠️ Build a New Skill
+
+The best way to contribute is by building a new skill. Each skill is a self-contained folder under `skills/` with:
+
+1. **`SKILL.md`** — declares parameters (rendered as UI in Aegis) and capabilities
+2. **`requirements.txt`** — Python dependencies
+3. **`scripts/`** — entry point using JSON-lines stdin/stdout protocol
+
+See [`skills/detection/yolo-detection-2026/`](skills/detection/yolo-detection-2026/) for a complete reference implementation.
+
+### 📋 Skill Ideas We Need
+
+- Camera providers: Eufy, Reolink, Tapo, Ring
+- Messaging channels: Matrix, LINE, Signal
+- Automation triggers: MQTT, webhooks
+- AI models: VLM scene analysis, SAM2 segmentation, depth estimation
+
+### 🐛 Report Issues
+
+- Use [GitHub Issues](https://github.com/SharpAI/DeepCamera/issues)
+- Include your platform, Python version, and steps to reproduce
+
+### 📝 Improve Documentation
+
+- Fix typos, improve clarity, add examples
+- Add platform-specific setup guides under `docs/`
+
+## Development Setup
+
+```bash
+git clone https://github.com/SharpAI/DeepCamera.git
+cd DeepCamera
+
+# Work on a skill
+cd skills/detection/yolo-detection-2026
+python3 -m venv .venv && source .venv/bin/activate
+pip install -r requirements.txt
+```
+
+## Code Style
+
+- Python: follow PEP 8
+- Use type hints where practical
+- Add docstrings to public functions
+
+## License
+
+By contributing, you agree that your contributions will be licensed under the [MIT License](LICENSE).
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/Contributions.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/Contributions.md
new file mode 100644
index 00000000..39ad280f
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/Contributions.md
@@ -0,0 +1,23 @@
+# Third-Party Licenses & Acknowledgments
+
+This project uses or was inspired by the following open-source projects:
+
+## AI & ML Frameworks
+* [Ultralytics](https://github.com/ultralytics/ultralytics) — YOLOv8/v10/v11 (AGPL-3.0)
+* [Insightface](https://github.com/deepinsight/insightface) — Face recognition (MIT)
+* [FastReID](https://github.com/JDAI-CV/fast-reid) — Person re-identification (Apache-2.0)
+
+## Legacy Dependencies (src/)
+* [TensorFlow](https://github.com/tensorflow/tensorflow) — Apache License 2.0
+* [MXNet](https://github.com/apache/incubator-mxnet) — Apache License 2.0
+* [TVM](https://github.com/dmlc/tvm) — Apache License 2.0
+
+## Infrastructure
+* [Milvus](https://github.com/milvus-io/milvus) — Vector database (Apache-2.0)
+* [go2rtc](https://github.com/AlexxIT/go2rtc) — RTSP/WebRTC streaming (MIT)
+* [Node.js](https://nodejs.org) — MIT
+* [Python](https://www.python.org) — PSF License
+
+## Historical
+* Shinobi — https://gitlab.com/Shinobi-Systems/Shinobi/
+* Termux — https://github.com/termux/termux-app (GPLv3)
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/LICENSE b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/LICENSE
new file mode 100644
index 00000000..e22482f4
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 SharpAI Dev Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/README.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/README.md
new file mode 100644
index 00000000..63609d6b
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/README.md
@@ -0,0 +1,67 @@
+# YOLO 2026 Coral TPU — Real-Time Object Detection
+
+This DeepCamera skill executes real-time object detection natively utilizing the Google Coral Edge TPU USB Accelerator. By executing localized inference on tensor processing hardware, it provides exceptional detection speeds (upwards of ~4ms on 320x320 models) while maintaining complete privacy and functioning entirely offline without relying on cloud providers.
+
+## Architecture & Data Flow
+
+When executing inside Aegis-AI, this skill is deployed as an entirely isolated local process.
+
+```mermaid
+flowchart TD
+ %% Define components
+ subgraph Aegis[Aegis-AI Engine]
+ A[Camera Stream] -->|Extracts frame| B(Workspace Memory)
+ B -->|Saves JPEG| C{/tmp/aegis_detection/}
+ end
+
+ subgraph Native_Process[Python Virtual Env]
+ D[detect.py Script]
+ E(ai-edge-litert)
+ F[yolo26n_edgetpu.tflite]
+ W[TPU Watchdog]
+ end
+
+ subgraph Hardware
+ G[Google Coral Edge TPU]
+ end
+
+ %% Define communication lines
+ Aegis -->|stdin JSONL 'frame' event| Native_Process
+ D -.->|Loads| F
+ C -.->|Reads JPEG| D
+ D <-->|Monitors| W
+ D -->|Hands off tensors| E
+ E <==>|USB 3.0 Inferencing| G
+ Native_Process -->|stdout JSONL 'detections'| Aegis
+ Native_Process -.->|stdout JSONL 'tpu_error'| Aegis
+```
+
+### Flow Breakdown
+
+1. **Deployment Phase**:
+ The OS-specific deployment scripts (`deploy-linux.sh`, `deploy-macos.sh`, `deploy.bat`) provision essential C-libraries (`libusb`), native Google driver binaries (`libedgetpu`), and create an isolated `python3 -m venv` sandbox securely.
+ * **Dynamic Models:** Instead of tracking massive raw binary files in git, the deployment script dynamically downloads the `.tflite` compiled models directly from Google's master Edge TPU repository using `curl` / `Invoke-WebRequest`.
+2. **Inference Loop**:
+ - The host system (Aegis-AI) continuously records frames and saves a snapshot to `/tmp/aegis_detection/` memory cache.
+ - Using standard input (`stdin`), Aegis-AI sends a brief JSON control sequence instructing the Python watcher script (`detect.py`) to process the frame.
+ - The Edge TPU fetches the tensor, performs native hardware execution using `libusb`, and instantly evaluates bounding box predictions without triggering CPU payload spikes.
+ - Results are streamed synchronously over standard output (`stdout`) to Aegis-AI.
+
+## Resilience & Auto-Recovery
+
+The integration features a native **TPU Health Watchdog** built directly into `detect.py`:
+* **Hang Detection**: If the USB connection fails and the `ai-edge-litert` delegate locks up, the invocation thread will hard-timeout after 10 seconds. The skill emits a `invoke_timeout` error and exits, prompting Aegis-AI to autonomously restart the sidecar process.
+* **Stall Detection**: If the TPU thermally throttles and begins silently returning zero objects for 30 consecutive frames (despite functioning perfectly seconds prior), the watchdog emits a `stall` telemetry event back to Aegis-AI.
+
+## Platform Differences
+
+* **Linux**: Inherits Python dependencies securely and provisions Google's `libedgetpu1-max` driver via `apt-get` on Debian/Ubuntu systems. Uses `ai-edge-litert` to support modern Python (3.9 - 3.13) without requiring legacy `pycoral` or `pyenv` locking.
+* **macOS**: Fully supports Apple Silicon natively. Uses the `feranick/libedgetpu` community fork to allow Edge TPU execution on ARM64 chips without relying on Rosetta 2.
+* **Windows**: Relies on PowerShell bootstrapping and transparent UAC elevation logs to quietly execute Google's `install.bat`. Fallbacks to community wheels gracefully.
+
+## Configuration Options
+
+Configure these inside the Aegis-AI UI:
+* **Input Resolution:** `320` is highly recommended. It perfectly fits into the Edge TPU's internal SRAM cache and executes fully on the co-processor. Scaling up to `640` pushes memory limits and offloads chunks to the host CPU, slowing things significantly.
+* **FPS:** Caps execution speeds to prevent thermal starvation and USB saturation on shared bus systems.
+* **Clock Speed:** Standard is safe. Max draws more power and produces thermal heat; it should only be used if there is a heatsink fan actively installed on the TPU.
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/SKILL.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/SKILL.md
new file mode 100644
index 00000000..d5c366d3
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/SKILL.md
@@ -0,0 +1,130 @@
+---
+name: yolo-detection-2026-coral-tpu-win-wsl
+description: "Google Coral Edge TPU — real-time object detection natively via Windows WSL"
+version: 1.0.0
+icon: assets/icon.png
+entry: scripts/wsl_wrapper.cjs
+deploy:
+ windows: deploy.bat
+runtime: wsl-python
+
+requirements:
+ platforms: ["windows"]
+
+
+
+parameters:
+ - name: auto_start
+ label: "Auto Start"
+ type: boolean
+ default: false
+ description: "Start this skill automatically when Aegis launches"
+ group: Lifecycle
+
+ - name: confidence
+ label: "Confidence Threshold"
+ type: number
+ min: 0.1
+ max: 1.0
+ default: 0.5
+ description: "Minimum detection confidence — lower than GPU models due to INT8 quantization"
+ group: Model
+
+ - name: classes
+ label: "Detect Classes"
+ type: string
+ default: "person,car,dog,cat"
+ description: "Comma-separated COCO class names (80 classes available)"
+ group: Model
+
+ - name: fps
+ label: "Processing FPS"
+ type: select
+ options: [0.2, 0.5, 1, 3, 5, 15]
+ default: 5
+ description: "Frames per second — Edge TPU handles 15+ FPS easily"
+ group: Performance
+
+ - name: input_size
+ label: "Input Resolution"
+ type: select
+ options: [320, 640]
+ default: 320
+ description: "320 fits fully on TPU (~4ms), 640 partially on CPU (~20ms)"
+ group: Performance
+
+ - name: tpu_device
+ label: "TPU Device"
+ type: select
+ options: ["auto", "0", "1", "2", "3"]
+ default: "auto"
+ description: "Which Edge TPU to use — auto selects first available"
+ group: Performance
+
+ - name: clock_speed
+ label: "TPU Clock Speed"
+ type: select
+ options: ["standard", "max"]
+ default: "standard"
+ description: "Max is faster but runs hotter — needs active cooling for sustained use"
+ group: Performance
+
+capabilities:
+ live_detection:
+ script: scripts/detect.py
+ description: "Real-time object detection on live camera frames via Edge TPU inside WSL"
+
+category: detection
+mutex: detection
+---
+
+# Coral TPU Object Detection (Windows WSL)
+
+Real-time object detection natively utilizing the Google Coral Edge TPU accelerator on your local hardware via Windows Subsystem for Linux (WSL). Detects 80 COCO classes (person, car, dog, cat, etc.) with ~4ms inference on 320x320 input.
+
+## Requirements
+
+- **Google Coral USB Accelerator** (USB 3.0 port recommended)
+- **WSL2** installed and running on Windows
+- `usbipd-win` installed on the Windows host
+
+## How It Works
+
+```
+┌─────────────────────────────────────────────────────┐
+│ Host (Aegis-AI on Windows) │
+│ frame.jpg → /tmp/aegis_detection/ │
+│ stdin ──→ ┌──────────────────────────────┐ │
+│ │ WSL Container / Environment │ │
+│ │ detect.py │ │
+│ │ ├─ loads _edgetpu.tflite │ │
+│ │ ├─ reads frame from disk │ │
+│ │ └─ runs inference on TPU │ │
+│ stdout ←── │ → JSONL detections │ │
+│ └──────────────────────────────┘ │
+│ USB ──→ usbipd-win bridge to WSL │
+└─────────────────────────────────────────────────────┘
+```
+
+1. Aegis writes camera frame JPEG to shared `/tmp/aegis_detection/` workspace
+2. Sends `frame` event via stdin JSONL to the WSL Python instance
+3. `detect.py` invokes PyCoral and executes natively on the mapped USB Edge TPU inside Linux
+4. Returns `detections` event via stdout JSONL back to Windows Host
+
+## Performance
+
+| Input Size | Inference | On-chip | Notes |
+|-----------|-----------|---------|-------|
+| 320x320 | ~4ms | 100% | Fully on TPU, best for real-time |
+| 640x640 | ~20ms | Partial | Some layers on CPU (model segmented) |
+
+> **Cooling**: The USB Accelerator aluminum case acts as a heatsink. If too hot to touch during continuous inference, it will thermal-throttle. Consider active cooling or `clock_speed: standard`.
+
+## Installation
+
+### Windows (WSL)
+Run `deploy.bat` — this will:
+1. Verify `usbipd` is installed and bind the `18d1:9302` and `1a6e:089a` Edge TPU hardware IDs.
+2. Setup a Python virtual environment exclusively within WSL.
+3. Install the Edge TPU libraries and dependencies within the WSL boundary.
+4. Auto-attach the device using `usbipd` seamlessly during invocation.
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/_config.yml b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/_config.yml
new file mode 100644
index 00000000..c4192631
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/_config.yml
@@ -0,0 +1 @@
+theme: jekyll-theme-cayman
\ No newline at end of file
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/config.yaml b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/config.yaml
new file mode 100644
index 00000000..c5fc163f
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/config.yaml
@@ -0,0 +1,65 @@
+# Coral TPU Detection Skill — Configuration Schema
+# Parsed by Aegis skill-registry-service.cjs → parseConfigYaml()
+# Format: params[] with key, type, label, default, description, options
+
+params:
+ - key: auto_start
+ label: Auto Start
+ type: boolean
+ default: false
+ description: "Start this skill automatically when Aegis launches"
+
+ - key: confidence
+ label: Confidence Threshold
+ type: number
+ default: 0.5
+ description: "Minimum detection confidence (0.1–1.0). Lower than GPU YOLO — Edge TPU INT8 quantization produces softer scores"
+
+ - key: fps
+ label: Frame Rate
+ type: select
+ default: 5
+ description: "Detection processing rate — Edge TPU is fast enough for real-time"
+ options:
+ - { value: 0.2, label: "Ultra Low (0.2 FPS)" }
+ - { value: 0.5, label: "Low (0.5 FPS)" }
+ - { value: 1, label: "Normal (1 FPS)" }
+ - { value: 3, label: "Active (3 FPS)" }
+ - { value: 5, label: "High (5 FPS)" }
+ - { value: 15, label: "Real-time (15 FPS)" }
+
+ - key: classes
+ label: Detection Classes
+ type: string
+ default: "person,car,dog,cat"
+ description: "Comma-separated COCO class names to detect"
+
+ - key: input_size
+ label: Input Resolution
+ type: select
+ default: 320
+ description: "Image size for inference — 320 fits fully on TPU (~4ms), 640 partially runs on CPU (~20ms)"
+ options:
+ - { value: 320, label: "320×320 (fastest, fully on-chip)" }
+ - { value: 640, label: "640×640 (more accurate, partially CPU)" }
+
+ - key: tpu_device
+ label: TPU Device
+ type: select
+ default: auto
+ description: "Which Edge TPU to use — 'auto' selects the first available device"
+ options:
+ - { value: auto, label: "Auto-detect (first available)" }
+ - { value: "0", label: "TPU 0" }
+ - { value: "1", label: "TPU 1" }
+ - { value: "2", label: "TPU 2" }
+ - { value: "3", label: "TPU 3" }
+
+ - key: clock_speed
+ label: TPU Clock Speed
+ type: select
+ default: standard
+ description: "Edge TPU operating frequency — 'max' is faster but runs hotter and needs cooling"
+ options:
+ - { value: standard, label: "Standard (lower power, cooler)" }
+ - { value: max, label: "Maximum (faster inference, needs cooling)" }
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/deploy.bat b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/deploy.bat
new file mode 100644
index 00000000..b7df110d
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/deploy.bat
@@ -0,0 +1,88 @@
+@echo off
+setlocal enabledelayedexpansion
+title Aegis-AI WSL Coral TPU Deployer
+
+echo ===================================================
+echo Aegis-AI Windows WSL Coral TPU Deployment
+echo ===================================================
+echo.
+echo This script will install the Edge TPU dependencies
+echo natively inside the Windows Subsystem for Linux (WSL).
+echo It utilizes usbipd-win to map the Coral USB to the
+echo Linux Kernel, ensuring maximum stability.
+echo.
+
+:: 1. Verify wsl exists
+where wsl >nul 2>nul
+if %errorlevel% neq 0 (
+ echo [AEGIS_PAUSE_MODAL] file=install_wsl.bat; msg=Windows Subsystem for Linux (WSL) is required to run the Coral TPU natively. Please click 'Launch Installer' (requires Admin) to install it, then verify your machine restarts. Once back, click 'Done'.
+ set /p DUMMY="Waiting for user to install WSL and click Done..."
+
+ where wsl >nul 2>nul
+ if %errorlevel% neq 0 (
+ echo ERROR: WSL is still not installed. Aborting deployment.
+ exit /b 1
+ )
+)
+
+:: 2. Verify usbipd exists
+where usbipd >nul 2>nul
+if %errorlevel% neq 0 (
+ echo [AEGIS_PAUSE_MODAL] file=install_usbipd.bat; msg=usbipd-win is required to pass the Coral TPU to WSL. Please click 'Launch Installer' to install it via winget, then click 'Done'.
+ set /p DUMMY="Waiting for user to install usbipd and click Done..."
+
+ where usbipd >nul 2>nul
+ if !errorlevel! neq 0 (
+ if exist "C:\Program Files\usbipd-win\usbipd.exe" (
+ set "PATH=%PATH%;C:\Program Files\usbipd-win\"
+ ) else (
+ echo ERROR: usbipd is still not installed. Aborting deployment.
+ exit /b 1
+ )
+ )
+)
+
+:: 3. Inform about hardware binding
+echo [1/4] Ensuring hardware is bound...
+echo Note: Hardware IDs 18d1:9302 and 1a6e:089a must be bound to usbipd.
+echo If they are not bound yet, please run 'usbipd bind' as Administrator.
+
+:: 4. Get the WSL path to the current directory
+set "DIR_PATH=%~dp0"
+set "DIR_PATH=%DIR_PATH:\=/%"
+set "DIR_PATH=%DIR_PATH:C:=/mnt/c%"
+set "DIR_PATH=%DIR_PATH:~0,-1%"
+
+:: 5. Install Dependencies inside WSL
+echo.
+echo [2/4] Initializing WSL Python 3.9 environment...
+wsl -u root -e bash -c "apt-get update && apt-get install -y software-properties-common curl wget libusb-1.0-0 && add-apt-repository -y ppa:deadsnakes/ppa && apt-get update && apt-get install -y python3.9 python3.9-venv python3.9-distutils"
+if %errorlevel% neq 0 (
+ echo ERROR: Failed to install Python 3.9 in WSL. Ensure you have internet access and WSL is running Ubuntu.
+ exit /b 1
+)
+
+:: 6. Create Virtual Env
+echo.
+echo [3/4] Creating Virtual Environment...
+wsl -e bash -c "cd '%DIR_PATH%' && python3.9 -m venv wsl_venv"
+if %errorlevel% neq 0 (
+ echo ERROR: Failed to create venv.
+ exit /b 1
+)
+
+:: 7. Install Python Packages and EdgeTPU Lib
+echo.
+echo [4/4] Installing Python requirements and Coral TPU drivers...
+wsl -e bash -c "cd '%DIR_PATH%' && source wsl_venv/bin/activate && curl -sS https://bootstrap.pypa.io/get-pip.py | python3.9 && python3.9 -m pip install -r requirements.txt"
+wsl -u root -e bash -c "cd '%DIR_PATH%' && wget -qO libedgetpu.deb https://packages.cloud.google.com/apt/pool/coral-edgetpu-stable/libedgetpu1-max_16.0_amd64_0ac21f1924dd4b125d5cfc5f6d0e4a5e.deb && dpkg -x libedgetpu.deb ext && cp ext/usr/lib/x86_64-linux-gnu/libedgetpu.so.1.0 libedgetpu.so.1 && rm -rf ext libedgetpu.deb"
+:: Install libedgetpu into the real WSL Linux filesystem so dlopen() works (NTFS /mnt/c/ lacks exec bit)
+wsl -u root -e bash -c "cp '%DIR_PATH%/libedgetpu.so.1' /usr/local/lib/libedgetpu.so.1 && ldconfig"
+
+echo.
+echo.
+echo SUCCESS: Windows WSL Deployment Complete!
+echo.
+echo Aegis-AI is ready to trigger the detection node natively on WSL!
+echo You can safely close this terminal.
+exit /b 0
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker-compose.yml b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker-compose.yml
new file mode 100644
index 00000000..ef17cba8
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker-compose.yml
@@ -0,0 +1,38 @@
+# Coral TPU Detection — Docker Compose for manual testing
+#
+# Usage:
+# Linux: docker compose up
+# macOS: Docker Desktop 4.35+ handles USB/IP automatically
+# Windows: Docker Desktop 4.35+ with USB/IP or WSL2 backend
+#
+# Interactive mode (for JSONL stdin/stdout testing):
+# docker compose run --rm coral-detect
+
+services:
+ coral-detect:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ image: aegis-coral-tpu:latest
+ stdin_open: true # Keep stdin open for JSONL input
+ tty: false # No pseudo-TTY — raw pipe mode
+ devices:
+ - /dev/bus/usb:/dev/bus/usb # Linux USB passthrough
+ volumes:
+ - /tmp/aegis_detection:/tmp/aegis_detection # Shared frame exchange
+ environment:
+ - PYTHONUNBUFFERED=1
+ - AEGIS_SKILL_ID=yolo-detection-2026-coral-tpu
+ - AEGIS_SKILL_PARAMS={"confidence":0.5,"classes":"person,car,dog,cat","fps":5,"input_size":320,"tpu_device":"auto","clock_speed":"standard"}
+ restart: "no"
+
+ # Utility: probe connected TPU devices
+ tpu-probe:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ devices:
+ - /dev/bus/usb:/dev/bus/usb
+ entrypoint: ["python3", "scripts/tpu_probe.py"]
+ profiles:
+ - tools # Only runs with: docker compose --profile tools run tpu-probe
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/Dockerfile b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/Dockerfile
new file mode 100644
index 00000000..5f349b83
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/Dockerfile
@@ -0,0 +1,56 @@
+# syntax=docker/dockerfile:1
+# ─────────────────────────────────────────────────────────────────────────────
+# Coral Edge TPU Model Compiler
+#
+# Compiles YOLO 2026 nano to Google Coral Edge TPU .tflite format using
+# ultralytics' built-in format="edgetpu" export pipeline.
+#
+# Per https://docs.ultralytics.com/guides/coral-edge-tpu-on-raspberry-pi/:
+# model.export(format="edgetpu") handles the full pipeline:
+# .pt → ONNX → onnx2tf SavedModel → TFLite INT8 → edgetpu_compiler
+#
+# MUST run on linux/amd64 — edgetpu_compiler is x86_64 Linux only.
+# On Apple Silicon or Windows, Docker Desktop handles QEMU emulation.
+# ─────────────────────────────────────────────────────────────────────────────
+
+FROM --platform=linux/amd64 python:3.11-slim
+
+LABEL maintainer="Aegis-AI / DeepCamera"
+LABEL description="Compiles YOLO 2026 to Google Coral Edge TPU .tflite"
+
+# ── System deps ───────────────────────────────────────────────────────────────
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ curl \
+ gnupg \
+ apt-transport-https \
+ ca-certificates \
+ libgl1 \
+ libglib2.0-0 \
+ && rm -rf /var/lib/apt/lists/*
+
+# ── edgetpu_compiler from Google Coral apt (x86_64 only) ─────────────────────
+RUN curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg \
+ | gpg --dearmor -o /usr/share/keyrings/coral-edgetpu.gpg \
+ && echo "deb [signed-by=/usr/share/keyrings/coral-edgetpu.gpg] \
+ https://packages.cloud.google.com/apt coral-edgetpu-stable main" \
+ > /etc/apt/sources.list.d/coral-edgetpu.list \
+ && apt-get update \
+ && apt-get install -y --no-install-recommends edgetpu-compiler \
+ && rm -rf /var/lib/apt/lists/*
+
+# ── Python: ultralytics handles all TF/onnx2tf version management ─────────────
+# ultralytics auto-installs: onnx2tf, tensorflow-cpu, onnxslim, etc.
+RUN pip install --no-cache-dir \
+ "ultralytics>=8.3.0" \
+ "numpy>=1.24.0,<2.0"
+
+# ── Copy compile entrypoint ───────────────────────────────────────────────────
+WORKDIR /compile
+COPY scripts/compile_model.py /compile/compile_model.py
+
+# ── Output volume (mount skill's models/ directory here) ──────────────────────
+VOLUME ["/compile/output"]
+
+# ── Entrypoint ────────────────────────────────────────────────────────────────
+ENTRYPOINT ["python", "/compile/compile_model.py"]
+CMD ["--model", "yolo26n", "--size", "320", "--output", "/compile/output"]
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/README.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/README.md
new file mode 100644
index 00000000..dd4b1a90
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/README.md
@@ -0,0 +1,84 @@
+# Coral Edge TPU Model Compiler — Docker
+
+Converts the YOLO 2026 nano model to Google Coral Edge TPU format using
+a Docker container (so `edgetpu_compiler` runs on Linux x86_64, even from
+Windows or Apple Silicon machines).
+
+## Pipeline
+
+```
+yolo26n.pt → [ultralytics export INT8] → yolo26n_int8.tflite
+ → [edgetpu_compiler] → yolo26n_int8_edgetpu.tflite
+```
+
+The compiled `.tflite` file is written to `../models/` and then committed
+to the git repository so `deploy.bat` / `deploy.sh` can pick it up without
+needing to compile again.
+
+## Requirements
+
+- Docker Desktop (Windows / macOS) or Docker Engine (Linux)
+- Internet access on first run (downloads `yolo26n.pt` from ultralytics + base image)
+- On Windows: Docker Desktop with WSL2 backend recommended
+
+## Quick Start
+
+### Option A — Shell script (Linux / macOS / Git Bash on Windows)
+
+```bash
+bash docker/compile.sh
+```
+
+### Option B — Docker Compose
+
+```bash
+# From the skill root (yolo-detection-2026-coral-tpu/)
+docker compose -f docker/docker-compose.yml run --rm coral-compiler
+```
+
+### Option C — Raw Docker commands
+
+```bash
+# Build
+docker build --platform linux/amd64 -t coral-tpu-compiler -f docker/Dockerfile .
+
+# Run (mounts models/ as output)
+docker run --rm --platform linux/amd64 \
+ -v "$(pwd)/models:/compile/output" \
+ coral-tpu-compiler \
+ --model yolo26n --size 320 --output /compile/output
+```
+
+## Output Files
+
+After compilation, `models/` will contain:
+
+| File | Size | Notes |
+|------|------|-------|
+| `yolo26n_int8.tflite` | ~3–4 MB | Full-integer quantized (CPU fallback) |
+| `yolo26n_int8_edgetpu.tflite` | ~3–4 MB | Compiled for Edge TPU (primary model) |
+
+> **Note**: `edgetpu_compiler` may warn that some YOLO operations are not mapped
+> to the Edge TPU and will fall back to CPU. This is expected for larger YOLO
+> architectures with complex postprocessing. The 320×320 nano model achieves
+> ~100% on-chip mapping.
+
+## Committing the Model
+
+```bash
+git add models/*.tflite
+git commit -m "feat(coral-tpu): add compiled yolo26n edgetpu model (320x320 INT8)"
+git push
+```
+
+## Recompiling
+
+If you update the YOLO model or want a 640×640 version:
+
+```bash
+# 640×640 version
+bash docker/compile.sh --model yolo26n --size 640
+
+# Small model
+bash docker/compile.sh --model yolo26s --size 320
+```
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/compile.sh b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/compile.sh
new file mode 100644
index 00000000..88f035db
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/compile.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+# compile.sh — Build and run the Coral EdgeTPU model compiler Docker image.
+#
+# Converts yolo26n.pt → TFLite INT8 → yolo26n_edgetpu.tflite
+# Output lands in ../models/ (tracked in the git repo).
+#
+# Usage:
+# bash docker/compile.sh # 320×320 nano (default)
+# bash docker/compile.sh --size 640 # 640×640 nano
+# bash docker/compile.sh --model yolo26s # small model
+#
+# Requirements:
+# - Docker with buildx / multi-platform support
+# - Internet access (downloads yolo26n.pt from ultralytics on first run)
+#
+# On Apple Silicon or Windows, Docker Desktop handles linux/amd64 emulation
+# via Rosetta / QEMU automatically. First run will be slower (emulation).
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+SKILL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
+MODELS_DIR="$SKILL_DIR/models"
+IMAGE_NAME="coral-tpu-compiler"
+
+# ── Parse args (pass-through to compile_model.py) ────────────────────────────
+COMPILE_ARGS=("$@")
+if [[ ${#COMPILE_ARGS[@]} -eq 0 ]]; then
+ COMPILE_ARGS=(--model yolo26n --size 320 --output /compile/output)
+fi
+
+log() { echo "[compile.sh] $*" >&2; }
+
+log "Skill dir : $SKILL_DIR"
+log "Models out: $MODELS_DIR"
+log "Args : ${COMPILE_ARGS[*]}"
+
+# ── Ensure models dir exists ──────────────────────────────────────────────────
+mkdir -p "$MODELS_DIR"
+
+# ── Build image (linux/amd64 required for edgetpu_compiler) ──────────────────
+log "Building Docker image: $IMAGE_NAME (linux/amd64)..."
+docker build \
+ --platform linux/amd64 \
+ --tag "$IMAGE_NAME:latest" \
+ --file "$SCRIPT_DIR/Dockerfile" \
+ "$SKILL_DIR"
+
+log "Build complete. Running model compiler..."
+
+# ── Run compiler, mount models/ as output volume ──────────────────────────────
+docker run --rm \
+ --platform linux/amd64 \
+ --name coral-tpu-compile-run \
+ -v "$MODELS_DIR:/compile/output" \
+ "$IMAGE_NAME:latest" \
+ "${COMPILE_ARGS[@]}"
+
+echo ""
+log "✓ Compilation complete. Output files in: $MODELS_DIR"
+log ""
+log "Files produced:"
+ls -lh "$MODELS_DIR"/*.tflite 2>/dev/null || log " (no .tflite files yet — check compile output above)"
+
+echo ""
+log "Next steps:"
+log " 1. Verify the model: ls -lh $MODELS_DIR/*_edgetpu.tflite"
+log " 2. Commit to git: git -C '$SKILL_DIR' add models/*.tflite && git commit -m 'feat(coral-tpu): add compiled yolo26n edgetpu model'"
+log " 3. Run deploy.bat on your Windows machine to install the skill."
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/docker-compose.yml b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/docker-compose.yml
new file mode 100644
index 00000000..fc53b40f
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker/docker-compose.yml
@@ -0,0 +1,35 @@
+services:
+ # ─────────────────────────────────────────────────────────────────────────
+ # coral-compiler
+ #
+ # Compiles YOLO 2026 nano (.pt) → TFLite INT8 → Edge TPU (.tflite).
+ # Output files land in the skill's models/ directory.
+ #
+ # Usage:
+ # docker compose run --rm coral-compiler # default 320×320
+ # docker compose run --rm coral-compiler --size 640 # 640×640
+ # docker compose run --rm coral-compiler --model yolo26s # small model
+ #
+ # Build only:
+ # docker compose build coral-compiler
+ # ─────────────────────────────────────────────────────────────────────────
+ coral-compiler:
+ build:
+ context: ..
+ dockerfile: docker/Dockerfile
+ platforms:
+ - linux/amd64
+ platform: linux/amd64
+ image: coral-tpu-compiler:latest
+ volumes:
+ # Mount the skill's models/ directory as the compiler output
+ - ../models:/compile/output
+ command:
+ - --model
+ - yolo26n
+ - --size
+ - "320"
+ - --output
+ - /compile/output
+ # Don't restart — this is a one-shot build job
+ restart: "no"
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker_out.log b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker_out.log
new file mode 100644
index 00000000..5da2adda
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/docker_out.log
@@ -0,0 +1,129 @@
+Collecting tflite-runtime==2.14.0
+ Downloading tflite_runtime-2.14.0-cp39-cp39-manylinux2014_x86_64.whl (2.4 MB)
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.4/2.4 MB 16.7 MB/s eta 0:00:00
+Collecting pillow
+ Downloading pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (6.6 MB)
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.6/6.6 MB 39.5 MB/s eta 0:00:00
+Collecting numpy
+ Downloading numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (19.5 MB)
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 19.5/19.5 MB 33.4 MB/s eta 0:00:00
+Installing collected packages: pillow, numpy, tflite-runtime
+Successfully installed numpy-2.0.2 pillow-11.3.0 tflite-runtime-2.14.0
+WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
+
+[notice] A new release of pip is available: 23.0.1 -> 26.0.1
+[notice] To update, run: pip install --upgrade pip
+Get:1 http://deb.debian.org/debian trixie InRelease [140 kB]
+Get:2 http://deb.debian.org/debian trixie-updates InRelease [47.3 kB]
+Get:3 http://deb.debian.org/debian-security trixie-security InRelease [43.4 kB]
+Get:4 http://deb.debian.org/debian trixie/main amd64 Packages [9671 kB]
+Get:5 http://deb.debian.org/debian trixie-updates/main amd64 Packages [5412 B]
+Get:6 http://deb.debian.org/debian-security trixie-security/main amd64 Packages [114 kB]
+Fetched 10.0 MB in 1s (10.4 MB/s)
+Reading package lists...
+Reading package lists...
+Building dependency tree...
+Reading state information...
+The following additional packages will be installed:
+ libgnutls30t64 libidn2-0 libp11-kit0 libpsl5t64 libtasn1-6 libunistring5
+ publicsuffix
+Suggested packages:
+ gnutls-bin
+The following NEW packages will be installed:
+ libgnutls30t64 libidn2-0 libp11-kit0 libpsl5t64 libtasn1-6 libunistring5
+ libusb-1.0-0 publicsuffix wget
+0 upgraded, 9 newly installed, 0 to remove and 14 not upgraded.
+Need to get 3926 kB of archives.
+After this operation, 13.0 MB of additional disk space will be used.
+Get:1 http://deb.debian.org/debian trixie/main amd64 libunistring5 amd64 1.3-2 [477 kB]
+Get:2 http://deb.debian.org/debian trixie/main amd64 libidn2-0 amd64 2.3.8-2 [109 kB]
+Get:3 http://deb.debian.org/debian trixie/main amd64 libp11-kit0 amd64 0.25.5-3 [425 kB]
+Get:4 http://deb.debian.org/debian trixie/main amd64 libtasn1-6 amd64 4.20.0-2 [49.9 kB]
+Get:5 http://deb.debian.org/debian trixie/main amd64 libgnutls30t64 amd64 3.8.9-3+deb13u2 [1468 kB]
+Get:6 http://deb.debian.org/debian trixie/main amd64 libpsl5t64 amd64 0.21.2-1.1+b1 [57.2 kB]
+Get:7 http://deb.debian.org/debian trixie/main amd64 wget amd64 1.25.0-2 [984 kB]
+Get:8 http://deb.debian.org/debian trixie/main amd64 libusb-1.0-0 amd64 2:1.0.28-1 [59.6 kB]
+Get:9 http://deb.debian.org/debian trixie/main amd64 publicsuffix all 20250328.1952-0.1 [296 kB]
+debconf: unable to initialize frontend: Dialog
+debconf: (TERM is not set, so the dialog frontend is not usable.)
+debconf: falling back to frontend: Readline
+debconf: unable to initialize frontend: Readline
+debconf: (Can't locate Term/ReadLine.pm in @INC (you may need to install the Term::ReadLine module) (@INC entries checked: /etc/perl /usr/local/lib/x86_64-linux-gnu/perl/5.40.1 /usr/local/share/perl/5.40.1 /usr/lib/x86_64-linux-gnu/perl5/5.40 /usr/share/perl5 /usr/lib/x86_64-linux-gnu/perl-base /usr/lib/x86_64-linux-gnu/perl/5.40 /usr/share/perl/5.40 /usr/local/lib/site_perl) at /usr/share/perl5/Debconf/FrontEnd/Readline.pm line 8, line 9.)
+debconf: falling back to frontend: Teletype
+debconf: unable to initialize frontend: Teletype
+debconf: (This frontend requires a controlling tty.)
+debconf: falling back to frontend: Noninteractive
+Fetched 3926 kB in 0s (13.3 MB/s)
+Selecting previously unselected package libunistring5:amd64.
+(Reading database ...
(Reading database ... 5%
(Reading database ... 10%
(Reading database ... 15%
(Reading database ... 20%
(Reading database ... 25%
(Reading database ... 30%
(Reading database ... 35%
(Reading database ... 40%
(Reading database ... 45%
(Reading database ... 50%
(Reading database ... 55%
(Reading database ... 60%
(Reading database ... 65%
(Reading database ... 70%
(Reading database ... 75%
(Reading database ... 80%
(Reading database ... 85%
(Reading database ... 90%
(Reading database ... 95%
(Reading database ... 100%
(Reading database ... 5644 files and directories currently installed.)
+Preparing to unpack .../0-libunistring5_1.3-2_amd64.deb ...
+Unpacking libunistring5:amd64 (1.3-2) ...
+Selecting previously unselected package libidn2-0:amd64.
+Preparing to unpack .../1-libidn2-0_2.3.8-2_amd64.deb ...
+Unpacking libidn2-0:amd64 (2.3.8-2) ...
+Selecting previously unselected package libp11-kit0:amd64.
+Preparing to unpack .../2-libp11-kit0_0.25.5-3_amd64.deb ...
+Unpacking libp11-kit0:amd64 (0.25.5-3) ...
+Selecting previously unselected package libtasn1-6:amd64.
+Preparing to unpack .../3-libtasn1-6_4.20.0-2_amd64.deb ...
+Unpacking libtasn1-6:amd64 (4.20.0-2) ...
+Selecting previously unselected package libgnutls30t64:amd64.
+Preparing to unpack .../4-libgnutls30t64_3.8.9-3+deb13u2_amd64.deb ...
+Unpacking libgnutls30t64:amd64 (3.8.9-3+deb13u2) ...
+Selecting previously unselected package libpsl5t64:amd64.
+Preparing to unpack .../5-libpsl5t64_0.21.2-1.1+b1_amd64.deb ...
+Unpacking libpsl5t64:amd64 (0.21.2-1.1+b1) ...
+Selecting previously unselected package wget.
+Preparing to unpack .../6-wget_1.25.0-2_amd64.deb ...
+Unpacking wget (1.25.0-2) ...
+Selecting previously unselected package libusb-1.0-0:amd64.
+Preparing to unpack .../7-libusb-1.0-0_2%3a1.0.28-1_amd64.deb ...
+Unpacking libusb-1.0-0:amd64 (2:1.0.28-1) ...
+Selecting previously unselected package publicsuffix.
+Preparing to unpack .../8-publicsuffix_20250328.1952-0.1_all.deb ...
+Unpacking publicsuffix (20250328.1952-0.1) ...
+Setting up libp11-kit0:amd64 (0.25.5-3) ...
+Setting up libunistring5:amd64 (1.3-2) ...
+Setting up libtasn1-6:amd64 (4.20.0-2) ...
+Setting up libusb-1.0-0:amd64 (2:1.0.28-1) ...
+Setting up publicsuffix (20250328.1952-0.1) ...
+Setting up libidn2-0:amd64 (2.3.8-2) ...
+Setting up libgnutls30t64:amd64 (3.8.9-3+deb13u2) ...
+Setting up libpsl5t64:amd64 (0.21.2-1.1+b1) ...
+Setting up wget (1.25.0-2) ...
+Processing triggers for libc-bin (2.41-12) ...
+--2026-03-31 19:47:38-- https://packages.cloud.google.com/apt/pool/coral-edgetpu-stable/libedgetpu1-max_16.0_amd64_0ac21f1924dd4b125d5cfc5f6d0e4a5e.deb
+Resolving packages.cloud.google.com (packages.cloud.google.com)... 142.251.218.142, 2607:f8b0:4005:801::200e
+Connecting to packages.cloud.google.com (packages.cloud.google.com)|142.251.218.142|:443... connected.
+HTTP request sent, awaiting response... 200 OK
+Length: 387960 (379K) [application/vnd.debian.binary-package]
+Saving to: ‘libedgetpu.deb’
+
+ 0K .......... .......... .......... .......... .......... 13% 1.50M 0s
+ 50K .......... .......... .......... .......... .......... 26% 2.72M 0s
+ 100K .......... .......... .......... .......... .......... 39% 3.45M 0s
+ 150K .......... .......... .......... .......... .......... 52% 5.38M 0s
+ 200K .......... .......... .......... .......... .......... 65% 6.17M 0s
+ 250K .......... .......... .......... .......... .......... 79% 5.45M 0s
+ 300K .......... .......... .......... .......... .......... 92% 7.87M 0s
+ 350K .......... .......... ........ 100% 7.11M=0.1s
+
+2026-03-31 19:47:39 (3.67 MB/s) - ‘libedgetpu.deb’ saved [387960/387960]
+
+Traceback (most recent call last):
+ File "/app/wsl_test.py", line 9, in
+ delegate = tflite.load_delegate("libedgetpu.so.1")
+ File "/usr/local/lib/python3.9/site-packages/tflite_runtime/interpreter.py", line 166, in load_delegate
+ delegate = Delegate(library, options)
+ File "/usr/local/lib/python3.9/site-packages/tflite_runtime/interpreter.py", line 73, in __init__
+ self._library = ctypes.pydll.LoadLibrary(library)
+ File "/usr/local/lib/python3.9/ctypes/__init__.py", line 452, in LoadLibrary
+ return self._dlltype(name)
+ File "/usr/local/lib/python3.9/ctypes/__init__.py", line 374, in __init__
+ self._handle = _dlopen(self._name, mode)
+OSError: libedgetpu.so.1: cannot open shared object file: No such file or directory
+Exception ignored in:
+Traceback (most recent call last):
+ File "/usr/local/lib/python3.9/site-packages/tflite_runtime/interpreter.py", line 109, in __del__
+ if self._library is not None:
+AttributeError: 'Delegate' object has no attribute '_library'
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/input.json b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/input.json
new file mode 100644
index 00000000..0db108a2
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/input.json
@@ -0,0 +1 @@
+{"event":"frame","frame_path":"test2.jpg","frame_id":"test2","camera_id":"test2","timestamp":"123"}
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/install_usbipd.bat b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/install_usbipd.bat
new file mode 100644
index 00000000..0f42e96f
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/install_usbipd.bat
@@ -0,0 +1,6 @@
+@echo off
+echo Installing usbipd-win via Windows Package Manager...
+winget install usbipd -e --accept-package-agreements --accept-source-agreements
+echo.
+echo Please close this window to continue.
+pause
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/install_wsl.bat b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/install_wsl.bat
new file mode 100644
index 00000000..d618cc65
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/install_wsl.bat
@@ -0,0 +1,6 @@
+@echo off
+echo Installing Windows Subsystem for Linux...
+wsl --install
+echo.
+echo Please reboot if Windows prompts you to, or close this window to continue.
+pause
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/libedgetpu.so.1 b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/libedgetpu.so.1
new file mode 100644
index 00000000..774993bb
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/libedgetpu.so.1 differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/README.md b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/README.md
new file mode 100644
index 00000000..77b7b9e5
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/README.md
@@ -0,0 +1,43 @@
+# Pre-compiled YOLO 2026 Nano models for Google Coral Edge TPU
+
+Place compiled `.tflite` files here. They are committed to the repository
+so `deploy.bat` and `deploy.sh` can use them without needing a Linux machine.
+
+## Files Expected
+
+| File | Size | Notes |
+|------|------|-------|
+| `yolo26n_int8_edgetpu.tflite` | ~3–4 MB | Edge TPU compiled (primary) |
+| `yolo26n_int8.tflite` | ~3–4 MB | CPU fallback |
+
+## How to Compile
+
+The `edgetpu_compiler` only runs on x86_64 Linux. Use the included Docker
+setup to compile from any OS (Windows, macOS, Linux):
+
+```bash
+# From the yolo-detection-2026-coral-tpu/ root:
+bash docker/compile.sh
+```
+
+Or with Docker Compose:
+```bash
+docker compose -f docker/docker-compose.yml run --rm coral-compiler
+```
+
+See `docker/README.md` for full instructions.
+
+## After Compiling
+
+```bash
+git add models/*.tflite
+git commit -m "feat(coral-tpu): add compiled yolo26n edgetpu model (320x320 INT8)"
+git push
+```
+
+## Alternative: CPU Fallback
+
+If no EdgeTPU model is present, `deploy.bat` / `deploy.sh` will download
+`ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite` as a functional
+fallback. This is SSD MobileNet (not YOLO 2026), but confirms the TPU
+pipeline works before the YOLO model is compiled.
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/ssd_mobilenet_v2_coco_quant_postprocess.tflite b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/ssd_mobilenet_v2_coco_quant_postprocess.tflite
new file mode 100644
index 00000000..fb62c75b
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/ssd_mobilenet_v2_coco_quant_postprocess.tflite differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
new file mode 100644
index 00000000..765a7b8c
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/models/yolo26n_full_integer_quant_edgetpu.tflite b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/yolo26n_full_integer_quant_edgetpu.tflite
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/models/yolo26n_full_integer_quant_edgetpu.tflite
rename to skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/yolo26n_full_integer_quant_edgetpu.tflite
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/models/yolo26n_int8.tflite b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/yolo26n_int8.tflite
similarity index 100%
rename from skills/detection/yolo-detection-2026-coral-tpu/models/yolo26n_int8.tflite
rename to skills/detection/yolo-detection-2026-coral-tpu-win-wsl/models/yolo26n_int8.tflite
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/output.txt b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/output.txt
new file mode 100644
index 00000000..cfb40203
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/output.txt
@@ -0,0 +1,8 @@
+[coral-detect] Starting with params: {}
+[coral-detect] Edge TPU delegate not available: Failed to load delegate from C:\Users\work\.aegis-ai\skills\yolo-detection-2026-coral-tpu\lib\edgetpu.dll
+
+[coral-detect] Falling back to CPU inference
+[coral-detect] Falling back to universal SSD MobileNet CPU model
+[coral-detect] Loaded model on CPU: C:\Users\work\.aegis-ai\skills\yolo-detection-2026-coral-tpu\models\ssd_mobilenet_v2_coco_quant_postprocess.tflite
+{"event": "ready", "model": "yolo26n_edgetpu", "device": "cpu", "format": "edgetpu_tflite", "runtime": "ai-edge-litert", "tpu_count": 0, "classes": 80, "input_size": 320, "fps": 5}
+[coral-detect] Ready waiting for frame events on stdin
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/requirements.txt b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/requirements.txt
new file mode 100644
index 00000000..2197a4ba
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/requirements.txt
@@ -0,0 +1,18 @@
+# Coral TPU Detection Skill — Python Dependencies
+#
+# Runtime: ai-edge-litert (LiteRT) — Google's modern TFLite runtime
+# Supports Python 3.9–3.13. No pycoral required.
+# detect.py uses the low-level ai-edge-litert API directly with
+# libedgetpu as a delegate — this is faster and simpler than pycoral.
+#
+# Hardware driver: libedgetpu (system library, installed separately)
+# Linux: libedgetpu.so.1 (via apt: libedgetpu1-std)
+# macOS: libedgetpu.1.dylib (via deploy-macos.sh)
+# Windows: edgetpu.dll (via deploy.bat UAC install)
+
+# LiteRT runtime — loads Edge TPU delegate on all platforms
+ai-edge-litert>=2.1.0
+
+# Image processing
+numpy>=1.24.0,<2.0
+Pillow>=10.0.0
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/compile_model.py b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/compile_model.py
new file mode 100644
index 00000000..1b155af6
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/compile_model.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+"""
+Coral TPU Model Compiler — YOLO 2026 → Edge TPU .tflite
+
+Uses ultralytics' built-in format="edgetpu" export, which handles the full
+pipeline internally:
+ .pt → ONNX → TFLite INT8 (via onnx2tf) → edgetpu_compiler → _edgetpu.tflite
+
+Per the Ultralytics docs (https://docs.ultralytics.com/guides/coral-edge-tpu-on-raspberry-pi/):
+ model.export(format="edgetpu")
+
+Output file: _saved_model/_full_integer_quant_edgetpu.tflite
+Copied to: /_full_integer_quant_edgetpu.tflite
+ /_full_integer_quant.tflite (CPU fallback)
+
+Requirements (pre-installed in Docker):
+ - ultralytics >= 8.3.0
+ - edgetpu_compiler (x86_64 Linux — Google Coral apt package)
+
+Usage:
+ python compile_model.py --model yolo26n --size 320 --output /compile/output
+ python compile_model.py --model yolo26n --size 640 --output /compile/output
+"""
+
+import argparse
+import glob
+import os
+import shutil
+import subprocess
+import sys
+from pathlib import Path
+
+
+def log(msg):
+ print(f"[compile] {msg}", flush=True)
+
+
+def check_edgetpu_compiler():
+ try:
+ r = subprocess.run(["edgetpu_compiler", "--version"],
+ capture_output=True, text=True, timeout=10)
+ log(f"edgetpu_compiler: {r.stdout.strip()}")
+ return True
+ except (FileNotFoundError, subprocess.TimeoutExpired):
+ log("ERROR: edgetpu_compiler not found.")
+ return False
+
+
+def export_edgetpu(model_name, imgsz, output_dir):
+ """
+ Export YOLO model using ultralytics format="edgetpu".
+
+ ultralytics handles:
+ 1. ONNX export
+ 2. onnx2tf → SavedModel
+ 3. TFLiteConverter INT8 quantization
+ 4. edgetpu_compiler
+
+ The only requirement is that edgetpu_compiler is on PATH.
+ """
+ try:
+ from ultralytics import YOLO
+ except ImportError:
+ log("ERROR: ultralytics not installed.")
+ sys.exit(1)
+
+ log(f"Exporting {model_name}.pt → Edge TPU (imgsz={imgsz})...")
+ log("This will: download model → ONNX → TFLite INT8 → edgetpu_compiler")
+ log("Estimated time: 5-15 minutes on first run (model download + compilation)")
+
+ model = YOLO(f"{model_name}.pt") # auto-downloads from ultralytics hub
+ result = model.export(
+ format="edgetpu",
+ imgsz=imgsz,
+ )
+ log(f"Export result: {result}")
+ return str(result) if result else None
+
+
+def collect_outputs(model_name, output_dir):
+ """
+ Copy compiled .tflite files to output_dir.
+ ultralytics saves to: ./_saved_model/
+ """
+ os.makedirs(output_dir, exist_ok=True)
+ saved_model_dir = f"{model_name}_saved_model"
+
+ patterns = [
+ f"{saved_model_dir}/*_edgetpu.tflite", # Edge TPU model
+ f"{saved_model_dir}/*_full_integer_quant_edgetpu.tflite",
+ f"{saved_model_dir}/*_int8.tflite", # CPU fallback
+ f"{saved_model_dir}/*_full_integer_quant.tflite",
+ ]
+
+ copied = []
+ seen = set()
+ for pattern in patterns:
+ for src in glob.glob(pattern):
+ dest = os.path.join(output_dir, os.path.basename(src))
+ if src not in seen:
+ shutil.copy2(src, dest)
+ size_mb = os.path.getsize(dest) / (1024 * 1024)
+ log(f" {os.path.basename(src)} → {dest} ({size_mb:.1f} MB)")
+ copied.append(dest)
+ seen.add(src)
+
+ return copied
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Compile YOLO 2026 for Coral Edge TPU via ultralytics format='edgetpu'"
+ )
+ parser.add_argument("--model", default="yolo26n",
+ help="YOLO model name (yolo26n, yolo26s, yolo26m, ...)")
+ parser.add_argument("--size", type=int, default=320,
+ help="Input image size (default: 320)")
+ parser.add_argument("--output", default="/compile/output",
+ help="Output directory for compiled model files")
+ args = parser.parse_args()
+
+ output_dir = args.output # Already absolute from Docker -v mount
+ log(f"Model : {args.model} Size: {args.size}×{args.size} Output: {output_dir}")
+
+ # Verify edgetpu_compiler is available before starting the long export
+ if not check_edgetpu_compiler():
+ log("edgetpu_compiler must be on PATH. Inside Docker it is pre-installed.")
+ sys.exit(1)
+
+ # Run ultralytics edgetpu export
+ export_edgetpu(args.model, args.size, output_dir)
+
+ # Collect and copy output files
+ log("Collecting compiled model files...")
+ outputs = collect_outputs(args.model, output_dir)
+
+ if not outputs:
+ log("ERROR: No .tflite files found after export.")
+ log(f"Check {args.model}_saved_model/ for output files.")
+ sys.exit(1)
+
+ edgetpu_files = [f for f in outputs if "_edgetpu" in f]
+ cpu_files = [f for f in outputs if "_edgetpu" not in f]
+
+ log("")
+ log("✓ Compilation complete!")
+ if edgetpu_files:
+ log(f" Edge TPU model : {edgetpu_files[0]}")
+ if cpu_files:
+ log(f" CPU fallback : {cpu_files[0]}")
+ log("")
+ log("Next steps:")
+ log(" git -C /path/to/DeepCamera add skills/detection/yolo-detection-2026-coral-tpu/models/*.tflite")
+ log(" git commit -m 'feat(coral-tpu): add compiled yolo26n edgetpu model'")
+ log(" git push")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/compile_model_colab.py b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/compile_model_colab.py
new file mode 100644
index 00000000..39c3098c
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/compile_model_colab.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+"""
+Google Colab / Kaggle — Compile YOLO26n for Coral Edge TPU
+
+YOLO26n (released Jan 2026) auto-downloads from Ultralytics.
+Uses `format="edgetpu"` which handles:
+ TFLite INT8 quantization + edgetpu_compiler in one step.
+
+Usage (Colab):
+ 1. Open https://colab.research.google.com
+ 2. New notebook → paste this into a cell → Run all
+ 3. Download the compiled _edgetpu.tflite model
+
+Usage (Kaggle):
+ 1. New notebook → Internet ON, GPU not needed
+ 2. Paste into cell → Run
+"""
+
+# ─── Step 1: Install dependencies ────────────────────────────────────────────
+import subprocess, sys, os
+
+print("=" * 60)
+print("Step 1/3: Installing Ultralytics + Edge TPU compiler...")
+print("=" * 60)
+
+subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
+ "ultralytics>=8.3.0"])
+
+# Install edgetpu_compiler (Colab/Kaggle are x86_64 Linux)
+subprocess.run(["bash", "-c", """
+ curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 2>/dev/null
+ echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" \
+ > /etc/apt/sources.list.d/coral-edgetpu.list
+ apt-get update -qq
+ apt-get install -y -qq edgetpu-compiler
+"""], check=True)
+print("✓ Dependencies ready\n")
+
+# ─── Step 2: Export YOLO26n to Edge TPU ──────────────────────────────────────
+print("=" * 60)
+print("Step 2/3: Downloading YOLO26n + exporting to Edge TPU...")
+print(" (auto-download from Ultralytics → INT8 quantize → edgetpu compile)")
+print("=" * 60)
+
+from ultralytics import YOLO
+
+# YOLO26n auto-downloads from Ultralytics hub (released Jan 2026)
+model = YOLO("yolo26n.pt")
+
+# format="edgetpu" = PT → TFLite INT8 → edgetpu_compiler → _edgetpu.tflite
+edgetpu_model = model.export(format="edgetpu", imgsz=320)
+
+print(f"\n✓ Edge TPU model: {edgetpu_model}")
+size_mb = os.path.getsize(str(edgetpu_model)) / (1024 * 1024)
+print(f" Size: {size_mb:.1f} MB")
+
+# ─── Step 3: Download ───────────────────────────────────────────────────────
+print("\n" + "=" * 60)
+print("Step 3/3: Download compiled model")
+print("=" * 60)
+
+import glob
+edgetpu_files = glob.glob("**/*_edgetpu.tflite", recursive=True)
+print(f"Found {len(edgetpu_files)} compiled model(s):")
+for f in edgetpu_files:
+ sz = os.path.getsize(f) / (1024 * 1024)
+ print(f" {f} ({sz:.1f} MB)")
+
+try:
+ from google.colab import files
+ for f in edgetpu_files:
+ files.download(f)
+ print("\n✓ Download started — check your browser downloads")
+except ImportError:
+ print("\nKaggle: use the Output tab, or:")
+ for f in edgetpu_files:
+ print(f" from IPython.display import FileLink; display(FileLink('{f}'))")
+
+print("\n" + "=" * 60)
+print("Copy the _edgetpu.tflite file to:")
+print(" skills/detection/yolo-detection-2026-coral-tpu/models/")
+print("=" * 60)
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/detect.py b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/detect.py
new file mode 100644
index 00000000..464d20ea
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/detect.py
@@ -0,0 +1,605 @@
+#!/usr/bin/env python3
+"""
+Coral TPU Object Detection — JSONL stdin/stdout protocol
+Uses ai-edge-litert (LiteRT) with Edge TPU delegate for hardware acceleration.
+Same protocol as yolo-detection-2026/scripts/detect.py.
+
+Communication:
+ stdin: {"event": "frame", "frame_id": N, "frame_path": "...", ...}
+ stdout: {"event": "detections", "frame_id": N, "objects": [...]}
+ stderr: Debug logs (ignored by Aegis parser)
+"""
+
+import json
+import os
+import sys
+import time
+import signal
+import threading
+from pathlib import Path
+from typing import Optional, List, Dict, Any, Tuple
+
+# ─── Windows DLL search path fix (MUST happen before any native import) ───────
+_LIB_DIR = Path(__file__).parent.parent / "lib"
+if sys.platform == "win32" and _LIB_DIR.exists():
+ os.add_dll_directory(str(_LIB_DIR))
+ os.environ["PATH"] = str(_LIB_DIR) + os.pathsep + os.environ.get("PATH", "")
+
+
+def _win_to_wsl_path(path: str) -> str:
+ """Translate a Windows-style path (C:\\...) to a WSL /mnt/ path when running in WSL."""
+ import re
+ if not path:
+ return path
+ # Already a Unix path — leave it alone
+ if path.startswith('/'):
+ return path
+ # Windows drive letter: C:\Users\... → /mnt/c/Users/...
+ match = re.match(r'^([A-Za-z]):[/\\](.*)', path)
+ if match:
+ drive = match.group(1).lower()
+ rest = match.group(2).replace('\\', '/')
+ return f'/mnt/{drive}/{rest}'
+ return path
+
+import numpy as np
+from PIL import Image
+
+# ─── LiteRT imports ────────────────────────────────────────────────────────────
+HAS_LITERT = False
+
+try:
+ import ai_edge_litert.interpreter as litert # Modern LiteRT (rebranded)
+ HAS_LITERT = True
+except ImportError:
+ try:
+ import tflite_runtime.interpreter as litert # Legacy PyCoral/TF
+ HAS_LITERT = True
+ except ImportError:
+ sys.stderr.write("[coral-detect] WARNING: ai-edge-litert or tflite_runtime not installed\n")
+
+
+def log(message: str) -> None:
+ sys.stderr.write(f"[coral-detect] {message}\n")
+ sys.stderr.flush()
+
+
+def emit_json(payload: Dict[str, Any]) -> None:
+ sys.stdout.write(json.dumps(payload, ensure_ascii=False) + "\n")
+ sys.stdout.flush()
+
+
+def _edgetpu_lib_name():
+ """Return the platform-specific libedgetpu shared library name."""
+ import platform
+ system = platform.system()
+
+ if system == "Linux":
+ # Priority 1: system-installed library (copied here by deploy.bat — works on NTFS-free path)
+ system_lib = Path("/usr/local/lib/libedgetpu.so.1")
+ if system_lib.exists():
+ return str(system_lib)
+ # Priority 2: bundled alongside the skill (may fail if on /mnt/c NTFS mount)
+ candidates = [
+ Path(__file__).parent.parent / "libedgetpu.so.1",
+ Path(__file__).parent.parent / "lib" / "libedgetpu.so.1",
+ ]
+ for cand in candidates:
+ if cand.exists():
+ return str(cand.resolve())
+ return "libedgetpu.so.1"
+ elif system == "Darwin":
+ return "libedgetpu.1.dylib"
+ elif system == "Windows":
+ local_dll = _LIB_DIR / "edgetpu.dll"
+ if local_dll.exists():
+ return str(local_dll.resolve())
+ return "edgetpu.dll"
+ return "libedgetpu.so.1"
+
+
+# ─── COCO class names (80 classes) ────────────────────────────────────────────
+COCO_CLASSES = [
+ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
+ "truck", "boat", "traffic light", "fire hydrant", "stop sign",
+ "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
+ "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
+ "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
+ "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
+ "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
+ "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
+ "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
+ "couch", "potted plant", "bed", "dining table", "toilet", "tv",
+ "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
+ "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
+ "scissors", "teddy bear", "hair drier", "toothbrush"
+]
+
+
+class PerfTracker:
+ """Tracks per-frame timing and emits aggregate stats."""
+
+ def __init__(self, emit_interval=50):
+ self.emit_interval = emit_interval
+ self.timings: List[Dict[str, float]] = []
+ self.total_frames = 0
+
+ def record(self, timing_dict: Dict[str, float]) -> None:
+ self.timings.append(timing_dict)
+ self.total_frames += 1
+
+ def should_emit(self) -> bool:
+ return len(self.timings) >= self.emit_interval
+
+ def emit_and_reset(self) -> Optional[Dict[str, Any]]:
+ if not self.timings:
+ return None
+
+ stats = {"event": "perf_stats", "total_frames": len(self.timings), "timings_ms": {}}
+ for key in self.timings[0]:
+ values = sorted([t[key] for t in self.timings])
+ n = len(values)
+ p95_idx = min(n - 1, int(n * 0.95))
+ p99_idx = min(n - 1, int(n * 0.99))
+ stats["timings_ms"][key] = {
+ "avg": round(sum(values) / n, 2),
+ "p50": round(values[n // 2], 2),
+ "p95": round(values[p95_idx], 2),
+ "p99": round(values[p99_idx], 2),
+ }
+ self.timings = []
+ return stats
+
+
+class TPUHealthWatchdog:
+ """
+ Detects two distinct TPU failure modes:
+
+ 1. Inference hang: interpreter.invoke() takes longer than `invoke_timeout_s`.
+ 2. Silent stall: consecutive empty results after previous successful detections.
+ """
+
+ def __init__(self, invoke_timeout_s=10, stall_frames=30, min_active_frames=5):
+ self.invoke_timeout_s = invoke_timeout_s
+ self.stall_frames = stall_frames
+ self.min_active_frames = min_active_frames
+
+ self._consecutive_zero = 0
+ self._total_frames_with_detections = 0
+ self._invoke_exception: Optional[Exception] = None
+
+ def run_invoke(self, interpreter) -> None:
+ """Run interpreter.invoke() with a hard timeout."""
+ self._invoke_exception = None
+ completed = [False]
+
+ def _invoke():
+ try:
+ interpreter.invoke()
+ completed[0] = True
+ except Exception as e:
+ self._invoke_exception = e
+
+ t = threading.Thread(target=_invoke, daemon=True)
+ t.start()
+ t.join(timeout=self.invoke_timeout_s)
+
+ if t.is_alive():
+ raise RuntimeError(
+ f"TPU invoke() timed out after {self.invoke_timeout_s}s — "
+ "USB connection may be lost or TPU is locked up"
+ )
+
+ if self._invoke_exception is not None:
+ raise self._invoke_exception
+
+ def record(self, n_detections: int) -> Optional[str]:
+ if n_detections > 0:
+ self._total_frames_with_detections += 1
+ self._consecutive_zero = 0
+ return None
+
+ self._consecutive_zero += 1
+ if (
+ self._total_frames_with_detections >= self.min_active_frames
+ and self._consecutive_zero >= self.stall_frames
+ ):
+ return "stall"
+
+ return None
+
+ def reset_stall(self) -> None:
+ self._consecutive_zero = 0
+
+
+class CoralDetector:
+ """Edge TPU object detector using ai-edge-litert with libedgetpu delegate."""
+
+ def __init__(self, params: Dict[str, Any]):
+ self.params = params
+ self.confidence = float(params.get("confidence", 0.5))
+ self.input_size = int(params.get("input_size", 320))
+ self.interpreter = None
+ self.tpu_count = 0
+ self.device_name = "unknown"
+ self.watchdog = TPUHealthWatchdog(
+ invoke_timeout_s=10,
+ stall_frames=30,
+ min_active_frames=5,
+ )
+
+ classes_str = params.get("classes", "person,car,dog,cat")
+ self.target_classes = set(c.strip().lower() for c in classes_str.split(",") if c.strip())
+
+ self._load_model()
+
+ def _find_model_path(self) -> Optional[str]:
+ """Find the compiled Edge TPU model."""
+ candidates = [
+ Path("/app/models"),
+ Path(__file__).parent.parent / "models",
+ ]
+
+ for d in candidates:
+ if not d.exists():
+ continue
+ for pattern in [
+ "*_full_integer_quant_edgetpu.tflite",
+ "*_edgetpu.tflite",
+ "*.tflite",
+ ]:
+ matches = sorted(d.glob(pattern))
+ if matches:
+ return str(matches[0])
+
+ return None
+
+ def _load_model(self) -> None:
+ """Load model onto Edge TPU (or CPU fallback)."""
+ if not HAS_LITERT:
+ log("FATAL: ai-edge-litert not available. pip install ai-edge-litert")
+ emit_json({"event": "error", "message": "ai-edge-litert not installed", "retriable": False})
+ sys.exit(1)
+
+ model_path = self._find_model_path()
+ if not model_path:
+ log("ERROR: No .tflite model found in models/")
+ emit_json({"event": "error", "message": "No Edge TPU model found", "retriable": False})
+ sys.exit(1)
+
+ edgetpu_lib = _edgetpu_lib_name()
+ try:
+ if hasattr(litert, "Delegate"):
+ original_del = getattr(litert.Delegate, "__del__", None)
+ if original_del and not hasattr(litert.Delegate, "_patched_del"):
+ def safe_del(self):
+ try:
+ original_del(self)
+ except AttributeError:
+ pass
+ litert.Delegate.__del__ = safe_del
+ litert.Delegate._patched_del = True
+
+ delegate = litert.load_delegate(edgetpu_lib)
+ self.interpreter = litert.Interpreter(
+ model_path=model_path,
+ experimental_delegates=[delegate],
+ )
+ self.interpreter.allocate_tensors()
+ self.device_name = "coral"
+ self.tpu_count = 1
+ log(f"Loaded model on Edge TPU: {model_path}")
+ except (ValueError, OSError) as e:
+ log(f"Edge TPU delegate not available: {e}")
+ log("Falling back to CPU inference")
+ self._load_cpu_fallback(model_path)
+
+ def _load_cpu_fallback(self, model_path: str) -> None:
+ """Fallback to CPU-only LiteRT interpreter."""
+ cpu_path = model_path.replace("_edgetpu.tflite", ".tflite")
+ if not os.path.exists(cpu_path):
+ universal_fallback = os.path.join(
+ os.path.dirname(model_path),
+ "ssd_mobilenet_v2_coco_quant_postprocess.tflite",
+ )
+ if os.path.exists(universal_fallback):
+ log("Falling back to universal SSD MobileNet CPU model")
+ cpu_path = universal_fallback
+ elif "edgetpu" in model_path.lower():
+ log("FATAL: Cannot load Edge TPU compiled model on pure CPU, and no fallback model exists.")
+ emit_json({
+ "event": "error",
+ "message": "No Edge TPU plugged in and no pure-CPU fallback model found.",
+ "retriable": False,
+ })
+ sys.exit(1)
+ else:
+ cpu_path = model_path
+
+ try:
+ self.interpreter = litert.Interpreter(model_path=cpu_path)
+ self.interpreter.allocate_tensors()
+ self.device_name = "cpu"
+ log(f"Loaded model on CPU: {cpu_path}")
+ except Exception as e:
+ log(f"FATAL: Cannot load model: {e}")
+ emit_json({"event": "error", "message": f"Cannot load model: {e}", "retriable": False})
+ sys.exit(1)
+
+ def _prepare_input_tensor(self, img_resized: Image.Image, input_details: Dict[str, Any]) -> np.ndarray:
+ """
+ Prepare input tensor matching model dtype and quantization parameters.
+
+ Fixes crash where INT8 models were being fed UINT8 tensors.
+ """
+ req_dtype = input_details["dtype"]
+ input_shape = input_details["shape"]
+ quant = input_details.get("quantization", (0.0, 0))
+ scale, zero_point = quant if quant is not None else (0.0, 0)
+
+ img_np = np.asarray(img_resized)
+
+ if req_dtype == np.uint8:
+ tensor = img_np.astype(np.uint8)
+
+ elif req_dtype == np.int8:
+ # Quantize from float pixel domain using model input quantization.
+ # If scale metadata is missing/zero, fall back to common full-int8 image mapping.
+ if scale and scale > 0:
+ tensor_f = img_np.astype(np.float32) / scale + zero_point
+ tensor = np.clip(np.round(tensor_f), -128, 127).astype(np.int8)
+ else:
+ tensor = (img_np.astype(np.int16) - 128).clip(-128, 127).astype(np.int8)
+
+ elif req_dtype == np.float32:
+ tensor = img_np.astype(np.float32)
+ if scale and scale > 0:
+ tensor = (tensor - zero_point) * scale
+ else:
+ tensor /= 255.0
+
+ else:
+ tensor = img_np.astype(req_dtype)
+
+ return np.expand_dims(tensor, axis=0).reshape(input_shape)
+
+ def _dequantize_output(self, arr: np.ndarray, detail: Dict[str, Any]) -> np.ndarray:
+ """Convert quantized output tensor to float if needed."""
+ if np.issubdtype(arr.dtype, np.floating):
+ return arr.astype(np.float32)
+
+ scale, zero_point = detail.get("quantization", (0.0, 0))
+ if scale and scale > 0:
+ return (arr.astype(np.float32) - zero_point) * scale
+ return arr.astype(np.float32)
+
+ def _parse_ssd_outputs(
+ self,
+ output_details: List[Dict[str, Any]],
+ orig_w: int,
+ orig_h: int,
+ ) -> List[Dict[str, Any]]:
+ """Parse SSD MobileNet-style outputs: boxes, classes, scores, count."""
+ boxes = self._dequantize_output(
+ self.interpreter.get_tensor(output_details[0]["index"]),
+ output_details[0],
+ )[0]
+ classes = self._dequantize_output(
+ self.interpreter.get_tensor(output_details[1]["index"]),
+ output_details[1],
+ )[0]
+ scores = self._dequantize_output(
+ self.interpreter.get_tensor(output_details[2]["index"]),
+ output_details[2],
+ )[0]
+ count_tensor = self.interpreter.get_tensor(output_details[3]["index"])
+ count = int(np.array(count_tensor).flatten()[0])
+
+ objects: List[Dict[str, Any]] = []
+ for i in range(min(count, len(scores), 100)):
+ score = float(scores[i])
+ if score < self.confidence:
+ continue
+
+ class_id = int(classes[i])
+ class_name = COCO_CLASSES[class_id] if 0 <= class_id < len(COCO_CLASSES) else f"class_{class_id}"
+
+ if self.target_classes and class_name.lower() not in self.target_classes:
+ continue
+
+ y1, x1, y2, x2 = [float(v) for v in boxes[i]]
+ x1 = max(0.0, min(1.0, x1))
+ y1 = max(0.0, min(1.0, y1))
+ x2 = max(0.0, min(1.0, x2))
+ y2 = max(0.0, min(1.0, y2))
+
+ objects.append({
+ "label": class_name,
+ "confidence": round(score, 4),
+ "bbox": {
+ "x": round(x1 * orig_w, 1),
+ "y": round(y1 * orig_h, 1),
+ "width": round((x2 - x1) * orig_w, 1),
+ "height": round((y2 - y1) * orig_h, 1),
+ },
+ })
+
+ return objects
+
+ def detect_frame(self, frame_path: str) -> Tuple[List[Dict[str, Any]], Dict[str, float], Optional[str]]:
+ """Run detection on a single frame."""
+ t0 = time.perf_counter()
+
+ try:
+ # Translate Windows paths to WSL /mnt/ paths when running inside WSL
+ frame_path = _win_to_wsl_path(frame_path)
+ img = Image.open(frame_path).convert("RGB")
+ except Exception as e:
+ log(f"ERROR reading frame: {e}")
+ return [], {}, None
+
+ t_read = time.perf_counter()
+
+ input_details = self.interpreter.get_input_details()[0]
+ input_shape = input_details["shape"]
+ h, w = int(input_shape[1]), int(input_shape[2])
+ orig_w, orig_h = img.size
+ img_resized = img.resize((w, h), Image.LANCZOS)
+
+ try:
+ input_data = self._prepare_input_tensor(img_resized, input_details)
+ self.interpreter.set_tensor(input_details["index"], input_data)
+ except Exception as e:
+ log(f"ERROR preparing input tensor: dtype={input_details.get('dtype')} quant={input_details.get('quantization')} err={e}")
+ return [], {}, "input_error"
+
+ t_pre = time.perf_counter()
+
+ try:
+ self.watchdog.run_invoke(self.interpreter)
+ except RuntimeError as e:
+ log(f"TPU invoke() failed: {e}")
+ return [], {}, "hang"
+ except Exception as e:
+ log(f"Inference failed: {e}")
+ return [], {}, "invoke_error"
+
+ t_infer = time.perf_counter()
+
+ objects: List[Dict[str, Any]] = []
+ output_details = self.interpreter.get_output_details()
+
+ try:
+ if len(output_details) >= 4:
+ objects = self._parse_ssd_outputs(output_details, orig_w, orig_h)
+ else:
+ log(f"Unsupported model output layout: {len(output_details)} tensors")
+ except Exception as e:
+ log(f"ERROR parsing outputs: {e}")
+ return [], {}, "parse_error"
+
+ t_post = time.perf_counter()
+
+ timings = {
+ "read": round((t_read - t0) * 1000.0, 2),
+ "preprocess": round((t_pre - t_read) * 1000.0, 2),
+ "infer": round((t_infer - t_pre) * 1000.0, 2),
+ "postprocess": round((t_post - t_infer) * 1000.0, 2),
+ "total": round((t_post - t0) * 1000.0, 2),
+ }
+
+ health = self.watchdog.record(len(objects))
+ return objects, timings, health
+
+
+_shutdown = False
+
+
+def _handle_signal(signum, frame):
+ global _shutdown
+ _shutdown = True
+ log(f"Received signal {signum}, shutting down...")
+
+
+def main() -> None:
+ signal.signal(signal.SIGINT, _handle_signal)
+ signal.signal(signal.SIGTERM, _handle_signal)
+
+ raw_params = os.environ.get("AEGIS_SKILL_PARAMS", "{}")
+ try:
+ params = json.loads(raw_params)
+ except Exception:
+ params = {}
+
+ log(f"Starting with params: {json.dumps(params)}")
+
+ detector = CoralDetector(params)
+ perf = PerfTracker(emit_interval=50)
+
+ emit_json({
+ "event": "ready",
+ "model": "yolo26n_edgetpu",
+ "device": detector.device_name,
+ "format": "edgetpu_tflite",
+ "runtime": "ai-edge-litert",
+ "tpu_count": detector.tpu_count,
+ "classes": len(COCO_CLASSES),
+ "input_size": detector.input_size,
+ "fps": int(params.get("fps", 5)),
+ })
+ log("Ready — waiting for frame events on stdin")
+
+ while not _shutdown:
+ line = sys.stdin.readline()
+ if not line:
+ break
+
+ line = line.strip()
+ if not line:
+ continue
+
+ try:
+ msg = json.loads(line)
+ except json.JSONDecodeError:
+ log(f"Ignoring invalid JSON line: {line[:200]}")
+ continue
+
+ event = msg.get("event")
+ if event == "shutdown":
+ break
+
+ if event != "frame":
+ continue
+
+ frame_path = msg.get("frame_path")
+ frame_id = msg.get("frame_id")
+ camera_id = msg.get("camera_id")
+ timestamp = msg.get("timestamp")
+
+ if not frame_path:
+ emit_json({
+ "event": "error",
+ "frame_id": frame_id,
+ "message": "Missing frame_path",
+ "retriable": True,
+ })
+ continue
+
+ objects, timings, health = detector.detect_frame(frame_path)
+
+ emit_json({
+ "event": "detections",
+ "frame_id": frame_id,
+ "camera_id": camera_id,
+ "timestamp": timestamp,
+ "objects": objects,
+ })
+
+ if timings:
+ perf.record(timings)
+ if perf.should_emit():
+ stats = perf.emit_and_reset()
+ if stats:
+ emit_json(stats)
+
+ if health == "hang":
+ emit_json({
+ "event": "error",
+ "frame_id": frame_id,
+ "message": "TPU inference hang detected",
+ "retriable": True,
+ })
+ elif health == "stall":
+ emit_json({
+ "event": "warning",
+ "frame_id": frame_id,
+ "message": "TPU may be stalled: too many consecutive empty detections",
+ })
+
+ stats = perf.emit_and_reset()
+ if stats:
+ emit_json(stats)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/detect.py.bak b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/detect.py.bak
new file mode 100644
index 00000000..d2aecd0a
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/detect.py.bak
@@ -0,0 +1,639 @@
+#!/usr/bin/env python3
+"""
+Coral TPU Object Detection — JSONL stdin/stdout protocol
+Uses ai-edge-litert (LiteRT) with Edge TPU delegate for hardware acceleration.
+Same protocol as yolo-detection-2026/scripts/detect.py.
+
+Communication:
+ stdin: {"event": "frame", "frame_id": N, "frame_path": "...", ...}
+ stdout: {"event": "detections", "frame_id": N, "objects": [...]}
+ stderr: Debug logs (ignored by Aegis parser)
+"""
+
+import json
+import os
+import sys
+import time
+import signal
+import threading
+from pathlib import Path
+from typing import Optional, Tuple, List, Dict, Any
+
+# ─── Windows DLL search path fix (MUST happen before any native import) ───────
+# Python 3.8+ no longer searches PATH for DLLs loaded by native C extensions.
+# We must register our local lib/ directory so that when ai_edge_litert loads
+# edgetpu.dll, Windows can also find libusb-1.0.dll in the same folder.
+# Native delegates loaded via C++ LoadLibrary also bypass Python's DLL directory,
+# so we must append it to the system PATH environment variable as well.
+_LIB_DIR = Path(__file__).parent.parent / "lib"
+if sys.platform == "win32" and _LIB_DIR.exists():
+ os.add_dll_directory(str(_LIB_DIR))
+ os.environ["PATH"] = str(_LIB_DIR) + os.pathsep + os.environ.get("PATH", "")
+
+import numpy as np
+from PIL import Image
+
+# ─── LiteRT imports (replaces archived pycoral + tflite-runtime) ─────────────
+# ai-edge-litert is the modern successor to tflite-runtime, supporting
+# Python 3.9–3.13 on all platforms. The Edge TPU is accessed via the
+# libedgetpu delegate (installed separately as a system library).
+
+HAS_LITERT = False
+HAS_EDGETPU_DELEGATE = False
+
+try:
+ from ai_edge_litert import interpreter as litert
+ HAS_LITERT = True
+except ImportError:
+ sys.stderr.write("[coral-detect] WARNING: ai-edge-litert not installed\n")
+
+
+def _edgetpu_lib_name():
+ """Return the platform-specific libedgetpu shared library name."""
+ import platform
+ system = platform.system()
+ if system == "Linux":
+ return "libedgetpu.so.1"
+ elif system == "Darwin":
+ return "libedgetpu.1.dylib"
+ elif system == "Windows":
+ # os.add_dll_directory() already registered our lib/ folder above,
+ # so Windows can resolve all transitive dependencies (libusb-1.0.dll etc.).
+ # Use just the bare name so load_delegate() finds it through the
+ # registered DLL directories rather than trying to parse a full path.
+ local_dll = _LIB_DIR / "edgetpu.dll"
+ if local_dll.exists():
+ return str(local_dll.resolve())
+ return "edgetpu.dll"
+ return "libedgetpu.so.1"
+
+
+# ─── COCO class names (80 classes) ───────────────────────────────────────────
+COCO_CLASSES = [
+ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
+ "truck", "boat", "traffic light", "fire hydrant", "stop sign",
+ "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
+ "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
+ "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
+ "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
+ "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
+ "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
+ "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
+ "couch", "potted plant", "bed", "dining table", "toilet", "tv",
+ "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
+ "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
+ "scissors", "teddy bear", "hair drier", "toothbrush"
+]
+
+
+class PerfTracker:
+ """Tracks per-frame timing and emits aggregate stats."""
+
+ def __init__(self, emit_interval=50):
+ self.emit_interval = emit_interval
+ self.timings = []
+ self.total_frames = 0
+
+ def record(self, timing_dict):
+ self.timings.append(timing_dict)
+ self.total_frames += 1
+
+ def should_emit(self):
+ return len(self.timings) >= self.emit_interval
+
+ def emit_and_reset(self):
+ if not self.timings:
+ return None
+
+ stats = {"event": "perf_stats", "total_frames": len(self.timings), "timings_ms": {}}
+ for key in self.timings[0]:
+ values = sorted([t[key] for t in self.timings])
+ n = len(values)
+ stats["timings_ms"][key] = {
+ "avg": round(sum(values) / n, 2),
+ "p50": round(values[n // 2], 2),
+ "p95": round(values[int(n * 0.95)], 2),
+ "p99": round(values[int(n * 0.99)], 2),
+ }
+ self.timings = []
+ return stats
+
+
+class TPUHealthWatchdog:
+ """
+ Detects two distinct TPU failure modes:
+
+ 1. Inference hang: interpreter.invoke() takes longer than `invoke_timeout_s`.
+ This happens when the USB connection is lost or the TPU kernel driver locks.
+ We run invoke() on a daemon thread and join with a timeout.
+
+ 2. Silent stall: The TPU keeps returning results (no hang) but every result
+ is empty (0 detections) for `stall_frames` consecutive frames, AFTER the
+ skill had at least `min_active_frames` successful frames earlier.
+ This catches thermal throttling where the TPU resets internally.
+ """
+
+ def __init__(self, invoke_timeout_s=10, stall_frames=30, min_active_frames=5):
+ self.invoke_timeout_s = invoke_timeout_s
+ self.stall_frames = stall_frames
+ self.min_active_frames = min_active_frames
+
+ self._consecutive_zero = 0
+ self._total_frames_with_detections = 0
+ self._invoke_exception: Optional[Exception] = None
+
+ def run_invoke(self, interpreter):
+ """Run interpreter.invoke() with a hard timeout. Raises RuntimeError on hang."""
+ self._invoke_exception = None
+ completed = [False]
+
+ def _invoke():
+ try:
+ interpreter.invoke()
+ completed[0] = True
+ except Exception as e:
+ self._invoke_exception = e
+
+ t = threading.Thread(target=_invoke, daemon=True)
+ t.start()
+ t.join(timeout=self.invoke_timeout_s)
+
+ if t.is_alive():
+ # Thread is still blocked inside invoke() — TPU USB hang
+ raise RuntimeError(
+ f"TPU invoke() timed out after {self.invoke_timeout_s}s — "
+ "USB connection may be lost or TPU is locked up"
+ )
+
+ if self._invoke_exception is not None:
+ raise self._invoke_exception
+
+ def record(self, n_detections):
+ """Call after each frame. Returns a health status string or None."""
+ if n_detections > 0:
+ self._total_frames_with_detections += 1
+ self._consecutive_zero = 0
+ return None
+
+ self._consecutive_zero += 1
+
+ # Only fire the stall alert after the TPU was genuinely producing results
+ if (self._total_frames_with_detections >= self.min_active_frames
+ and self._consecutive_zero >= self.stall_frames):
+ return "stall"
+
+ return None
+
+ def reset_stall(self):
+ self._consecutive_zero = 0
+
+
+class CoralDetector:
+ """Edge TPU object detector using ai-edge-litert with libedgetpu delegate."""
+
+ def __init__(self, params):
+ self.params = params
+ self.confidence = float(params.get("confidence", 0.5))
+ self.input_size = int(params.get("input_size", 320))
+ self.interpreter = None
+ self.tpu_count = 0
+ self.watchdog = TPUHealthWatchdog(
+ invoke_timeout_s=10,
+ stall_frames=30,
+ min_active_frames=5,
+ )
+
+ # Parse target classes
+ classes_str = params.get("classes", "person,car,dog,cat")
+ self.target_classes = set(c.strip().lower() for c in classes_str.split(","))
+
+ self._load_model()
+
+ def _find_model_path(self):
+ """Find the compiled Edge TPU model.
+
+ ultralytics format="edgetpu" produces:
+ _saved_model/_full_integer_quant_edgetpu.tflite
+ Our compile_model.py copies it to models/ so we look there first.
+ """
+ model_dir = Path("/app/models")
+ script_dir = Path(__file__).parent.parent / "models"
+
+ for d in [model_dir, script_dir]:
+ if not d.exists():
+ continue
+ # ultralytics edgetpu naming: *_full_integer_quant_edgetpu.tflite
+ for pattern in [
+ "*_full_integer_quant_edgetpu.tflite",
+ "*_edgetpu.tflite",
+ "*.tflite",
+ ]:
+ matches = list(d.glob(pattern))
+ if matches:
+ return str(matches[0])
+
+ return None
+
+ def _load_model(self):
+ """Load model onto Edge TPU (or CPU fallback)."""
+ if not HAS_LITERT:
+ log("FATAL: ai-edge-litert not available. pip install ai-edge-litert")
+ emit_json({"event": "error", "message": "ai-edge-litert not installed", "retriable": False})
+ sys.exit(1)
+
+ model_path = self._find_model_path()
+ if not model_path:
+ log("ERROR: No .tflite model found in models/")
+ emit_json({"event": "error", "message": "No Edge TPU model found", "retriable": False})
+ sys.exit(1)
+
+ # Try loading with Edge TPU delegate
+ edgetpu_lib = _edgetpu_lib_name()
+ try:
+ # Squelch __del__ AttributeError bugs in ai-edge-litert when delegate fails to load
+ if hasattr(litert, 'Delegate'):
+ original_del = getattr(litert.Delegate, '__del__', None)
+ if original_del and not hasattr(litert.Delegate, '_patched_del'):
+ def safe_del(self):
+ try:
+ original_del(self)
+ except AttributeError:
+ pass
+ litert.Delegate.__del__ = safe_del
+ litert.Delegate._patched_del = True
+
+ delegate = litert.load_delegate(edgetpu_lib)
+ self.interpreter = litert.Interpreter(
+ model_path=model_path,
+ experimental_delegates=[delegate],
+ )
+ self.interpreter.allocate_tensors()
+ self.device_name = "coral"
+ self.tpu_count = 1
+ log(f"Loaded model on Edge TPU: {model_path}")
+ except (ValueError, OSError) as e:
+ log(f"Edge TPU delegate not available: {e}")
+ log("Falling back to CPU inference")
+ self._load_cpu_fallback(model_path)
+
+ def _load_cpu_fallback(self, model_path):
+ """Fallback to CPU-only LiteRT interpreter."""
+ # Use a non-edgetpu model if available
+ cpu_path = model_path.replace("_edgetpu.tflite", ".tflite")
+ if not os.path.exists(cpu_path):
+ universal_fallback = os.path.join(os.path.dirname(model_path), "ssd_mobilenet_v2_coco_quant_postprocess.tflite")
+ if os.path.exists(universal_fallback):
+ log("Falling back to universal SSD MobileNet CPU model")
+ cpu_path = universal_fallback
+ elif "edgetpu" in model_path.lower():
+ log("FATAL: Cannot load Edge TPU compiled model on pure CPU, and no fallback model exists.")
+ emit_json({"event": "error", "message": "No Edge TPU plugged in and no pure-CPU fallback model found.", "retriable": False})
+ sys.exit(1)
+ else:
+ cpu_path = model_path
+
+ try:
+ self.interpreter = litert.Interpreter(model_path=cpu_path)
+ self.interpreter.allocate_tensors()
+ self.device_name = "cpu"
+ log(f"Loaded model on CPU: {cpu_path}")
+ except Exception as e:
+ log(f"FATAL: Cannot load model: {e}")
+ emit_json({"event": "error", "message": f"Cannot load model: {e}", "retriable": False})
+ sys.exit(1)
+
+ def detect_frame(self, frame_path):
+ """Run detection on a single frame. Returns list of detection dicts."""
+ t0 = time.perf_counter()
+
+ # Read and resize image
+ try:
+ img = Image.open(frame_path).convert("RGB")
+ except Exception as e:
+ log(f"ERROR reading frame: {e}")
+ return [], {}
+
+ t_read = time.perf_counter()
+
+ # Resize to model input size
+ input_details = self.interpreter.get_input_details()[0]
+ input_shape = input_details["shape"]
+ h, w = input_shape[1], input_shape[2]
+ orig_w, orig_h = img.size
+ img_resized = img.resize((w, h), Image.LANCZOS)
+
+ # Prepare input data according to required dtype
+ req_dtype = input_details["dtype"]
+ raw_img = np.array(img_resized, dtype=np.float32)
+ if req_dtype == np.int8:
+ # Shift 0-255 to -128-127 for INT8 models
+ input_data = np.expand_dims((raw_img - 128).astype(np.int8), axis=0)
+ else:
+ input_data = np.expand_dims(raw_img.astype(np.uint8), axis=0)
+
+ self.interpreter.set_tensor(input_details["index"], input_data)
+
+ # Run inference with hard timeout via watchdog
+ t_pre = time.perf_counter()
+ try:
+ self.watchdog.run_invoke(self.interpreter)
+ except RuntimeError as e:
+ log(f"TPU invoke() failed: {e}")
+ return [], {}, "hang"
+ t_infer = time.perf_counter()
+
+ # Parse output tensors (works for both Edge TPU and CPU)
+ objects = []
+ output_details = self.interpreter.get_output_details()
+
+ if len(output_details) >= 4:
+ # SSD MobileNet-style output: boxes, classes, scores, count
+ boxes = self.interpreter.get_tensor(output_details[0]["index"])[0]
+ classes = self.interpreter.get_tensor(output_details[1]["index"])[0]
+ scores = self.interpreter.get_tensor(output_details[2]["index"])[0]
+ count = int(self.interpreter.get_tensor(output_details[3]["index"])[0])
+
+ for i in range(min(count, 25)):
+ score = float(scores[i])
+ if score < self.confidence:
+ continue
+ class_id = int(classes[i])
+ if class_id < len(COCO_CLASSES):
+ class_name = COCO_CLASSES[class_id]
+ else:
+ class_name = f"class_{class_id}"
+
+ if self.target_classes and class_name not in self.target_classes:
+ continue
+
+ y1, x1, y2, x2 = boxes[i]
+ objects.append({
+ "class": class_name,
+ "confidence": round(score, 3),
+ "bbox": [
+ int(x1 * orig_w), int(y1 * orig_h),
+ int(x2 * orig_w), int(y2 * orig_h)
+ ]
+ })
+ elif len(output_details) >= 1:
+ # YOLO-style output: single tensor with [1, num_classes + 4, num_anchors]
+ output = self.interpreter.get_tensor(output_details[0]["index"])
+ out_dtype = output_details[0]["dtype"]
+ q_scale, q_zero = output_details[0]["quantization"]
+
+ # Dequantize if returning INT8
+ if out_dtype == np.int8 and q_scale > 0:
+ output = (output.astype(np.float32) - q_zero) * q_scale
+
+ pred = output[0] # Shape: e.g. (84, 2100) or (2100, 84)
+
+ # Ultralytics tends to output (num_classes + 4, num_anchors), so transpose if so
+ if pred.shape[0] == len(COCO_CLASSES) + 4:
+ pred = pred.transpose() # Now (num_anchors, 84)
+
+ # Parse boxes and scores
+ boxes_cx = pred[:, 0]
+ boxes_cy = pred[:, 1]
+ boxes_w = pred[:, 2]
+ boxes_h = pred[:, 3]
+
+ # Scores for all classes
+ class_scores = pred[:, 4:]
+ max_scores = np.max(class_scores, axis=1)
+ class_ids = np.argmax(class_scores, axis=1)
+
+ # Filter by confidence
+ mask = max_scores >= self.confidence
+
+ boxes_cx = boxes_cx[mask]
+ boxes_cy = boxes_cy[mask]
+ boxes_w = boxes_w[mask]
+ boxes_h = boxes_h[mask]
+ class_ids = class_ids[mask]
+ max_scores = max_scores[mask]
+
+ # Convert cx, cy, w, h -> y1, x1, y2, x2
+ x1 = boxes_cx - boxes_w / 2
+ y1 = boxes_cy - boxes_h / 2
+ x2 = boxes_cx + boxes_w / 2
+ y2 = boxes_cy + boxes_h / 2
+
+ # Very basic NMS
+ # First, filter to target classes
+ filtered_indices = []
+ for i in range(len(max_scores)):
+ cid = class_ids[i]
+ cname = COCO_CLASSES[cid] if cid < len(COCO_CLASSES) else f"class_{cid}"
+ if self.target_classes and cname not in self.target_classes:
+ continue
+ filtered_indices.append(i)
+
+ if len(filtered_indices) > 0:
+ x1_f = x1[filtered_indices]
+ y1_f = y1[filtered_indices]
+ x2_f = x2[filtered_indices]
+ y2_f = y2[filtered_indices]
+ scores_f = max_scores[filtered_indices]
+ class_ids_f = class_ids[filtered_indices]
+
+ # Numpy NMS implementation
+ areas = (x2_f - x1_f) * (y2_f - y1_f)
+ order = scores_f.argsort()[::-1]
+
+ keep = []
+ # Keep top 25 at most
+ while order.size > 0 and len(keep) < 25:
+ i = order[0]
+ keep.append(i)
+ if order.size == 1:
+ break
+
+ xx1 = np.maximum(x1_f[i], x1_f[order[1:]])
+ yy1 = np.maximum(y1_f[i], y1_f[order[1:]])
+ xx2 = np.minimum(x2_f[i], x2_f[order[1:]])
+ yy2 = np.minimum(y2_f[i], y2_f[order[1:]])
+
+ w = np.maximum(0.0, xx2 - xx1)
+ h = np.maximum(0.0, yy2 - yy1)
+ inter = w * h
+ iou = inter / (areas[i] + areas[order[1:]] - inter)
+
+ inds = np.where(iou <= 0.45)[0] # NMS threshold
+ order = order[inds + 1]
+
+ for k in keep:
+ cid = class_ids_f[k]
+ cname = COCO_CLASSES[cid] if cid < len(COCO_CLASSES) else f"class_{cid}"
+
+ # Normalizing against original input resolution is done at UI or here
+ # Typical YOLOv8 models output values scaled by input resolution (e.g. 0 to 320)
+ # We normalize it to 0.0 - 1.0!
+ nx1 = float(x1_f[k]) / self.input_size
+ ny1 = float(y1_f[k]) / self.input_size
+ nx2 = float(x2_f[k]) / self.input_size
+ ny2 = float(y2_f[k]) / self.input_size
+
+ objects.append({
+ "class": cname,
+ "confidence": float(round(scores_f[k], 3)),
+ "bbox": [
+ int(nx1 * orig_w), int(ny1 * orig_h),
+ int(nx2 * orig_w), int(ny2 * orig_h)
+ ]
+ })
+
+ t_post = time.perf_counter()
+
+ timings = {
+ "file_read": round((t_read - t0) * 1000, 2),
+ "preprocess": round((t_pre - t_read) * 1000, 2),
+ "inference": round((t_infer - t_pre) * 1000, 2),
+ "postprocess": round((t_post - t_infer) * 1000, 2),
+ "total": round((t_post - t0) * 1000, 2),
+ }
+
+ # Record with watchdog — returns "stall" if TPU has gone silent
+ health = self.watchdog.record(len(objects))
+
+ return objects, timings, health
+
+
+# ─── Helpers ──────────────────────────────────────────────────────────────────
+
+def log(msg):
+ """Write to stderr (ignored by Aegis parser)."""
+ sys.stderr.write(f"[coral-detect] {msg}\n")
+ sys.stderr.flush()
+
+
+def emit_json(obj):
+ """Emit JSONL to stdout."""
+ sys.stdout.write(json.dumps(obj) + "\n")
+ sys.stdout.flush()
+
+
+# ─── Main loop ───────────────────────────────────────────────────────────────
+
+def main():
+ # Parse params from environment
+ params_str = os.environ.get("AEGIS_SKILL_PARAMS", "{}")
+ try:
+ params = json.loads(params_str)
+ except json.JSONDecodeError:
+ params = {}
+
+ log(f"Starting with params: {json.dumps(params)}")
+
+ # Initialize detector
+ detector = CoralDetector(params)
+ perf = PerfTracker(emit_interval=50)
+
+ # Emit ready event
+ emit_json({
+ "event": "ready",
+ "model": "yolo26n_edgetpu",
+ "device": detector.device_name,
+ "format": "edgetpu_tflite" if detector.device_name == "coral" else "tflite_cpu",
+ "runtime": "ai-edge-litert",
+ "tpu_count": detector.tpu_count,
+ "classes": len(COCO_CLASSES),
+ "input_size": detector.input_size,
+ "fps": params.get("fps", 5),
+ })
+
+ # Handle graceful shutdown
+ running = True
+ def on_signal(sig, frame):
+ nonlocal running
+ running = False
+ signal.signal(signal.SIGTERM, on_signal)
+ signal.signal(signal.SIGINT, on_signal)
+
+ # JSONL request-response loop
+ log("Ready — waiting for frame events on stdin")
+ for line in sys.stdin:
+ if not running:
+ break
+
+ line = line.strip()
+ if not line:
+ continue
+
+ try:
+ msg = json.loads(line)
+ except json.JSONDecodeError:
+ log(f"Invalid JSON: {line[:100]}")
+ continue
+
+ # Handle stop command
+ if msg.get("command") == "stop" or msg.get("event") == "stop":
+ log("Received stop command")
+ break
+
+ # Handle frame event
+ if msg.get("event") == "frame":
+ frame_id = msg.get("frame_id", 0)
+ frame_path = msg.get("frame_path", "")
+ camera_id = msg.get("camera_id", "")
+ timestamp = msg.get("timestamp", "")
+
+ if not frame_path or not os.path.exists(frame_path):
+ log(f"Frame not found: {frame_path}")
+ emit_json({
+ "event": "detections",
+ "frame_id": frame_id,
+ "camera_id": camera_id,
+ "timestamp": timestamp,
+ "objects": [],
+ })
+ continue
+
+ objects, timings, health = detector.detect_frame(frame_path)
+
+ # Check for TPU hang (invoke timeout)
+ if health == "hang":
+ emit_json({
+ "event": "tpu_error",
+ "frame_id": frame_id,
+ "camera_id": camera_id,
+ "error": "invoke_timeout",
+ "message": "TPU invoke() timed out — USB connection may be lost",
+ "retriable": True,
+ })
+ # Exit with code 1 so Aegis restarts us
+ sys.exit(1)
+
+ # Emit detections
+ emit_json({
+ "event": "detections",
+ "frame_id": frame_id,
+ "camera_id": camera_id,
+ "timestamp": timestamp,
+ "objects": objects,
+ })
+
+ # Check for silent stall (zero results for too long)
+ if health == "stall":
+ emit_json({
+ "event": "tpu_error",
+ "frame_id": frame_id,
+ "camera_id": camera_id,
+ "error": "stall",
+ "message": "TPU has returned 0 detections for 30 consecutive frames — possible thermal throttle or silent reset",
+ "retriable": True,
+ })
+ detector.watchdog.reset_stall() # Prevent repeated spam; let Aegis decide to restart
+
+ # Track performance
+ if timings:
+ perf.record(timings)
+ if perf.should_emit():
+ stats = perf.emit_and_reset()
+ if stats:
+ emit_json(stats)
+
+ log("Shutting down")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/install_pycoral.py b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/install_pycoral.py
new file mode 100644
index 00000000..61026ded
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/install_pycoral.py
@@ -0,0 +1,51 @@
+import sys
+import subprocess
+import json
+
+def emit(event_dict):
+ print(json.dumps(event_dict), flush=True)
+
+def main():
+ py_major = sys.version_info.major
+ py_minor = sys.version_info.minor
+
+ emit({"event": "progress", "stage": "build", "message": f"Detected Python {py_major}.{py_minor} — selecting appropriate PyCoral wheel..."})
+
+ if py_major == 3 and py_minor <= 9:
+ emit({"event": "progress", "stage": "build", "message": "Installing official Google pycoral 2.0 (Python <=3.9)..."})
+ url = "https://google-coral.github.io/py-repo/"
+ try:
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "--extra-index-url", url, "pycoral~=2.0"])
+ except subprocess.CalledProcessError:
+ print("WARNING: Official pycoral install failed.")
+ sys.exit(1)
+
+ elif py_major == 3 and py_minor == 10:
+ emit({"event": "progress", "stage": "build", "message": "Installing community pycoral for Python 3.10 (feranick/pycoral)..."})
+ url = "https://github.com/feranick/pycoral/releases/download/2.0.0TF2.11.1-1/pycoral-2.0.0-cp310-cp310-win_amd64.whl"
+ try:
+ subprocess.check_call([sys.executable, "-m", "pip", "install", url])
+ except subprocess.CalledProcessError:
+ print("ERROR: Failed to install community wheel for Python 3.10")
+ sys.exit(1)
+
+ elif py_major == 3 and py_minor == 11:
+ emit({"event": "progress", "stage": "build", "message": "Installing community pycoral for Python 3.11 (feranick/pycoral)..."})
+ url = "https://github.com/feranick/pycoral/releases/download/2.0.0TF2.11.1-1/pycoral-2.0.0-cp311-cp311-win_amd64.whl"
+ try:
+ subprocess.check_call([sys.executable, "-m", "pip", "install", url])
+ except subprocess.CalledProcessError:
+ print("ERROR: Failed to install community wheel for Python 3.11")
+ sys.exit(1)
+
+ else:
+ emit({
+ "event": "error",
+ "stage": "build",
+ "message": f"No pre-compiled PyCoral Windows wheels for Python {py_major}.{py_minor}. Please downgrade to Python 3.9, 3.10, or 3.11."
+ })
+ print(f"ERROR: Unsupported Python version {py_major}.{py_minor} for Edge TPU on Windows.")
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/tpu_probe.py b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/tpu_probe.py
new file mode 100644
index 00000000..ded1a397
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/tpu_probe.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+"""
+Coral TPU Device Probe — tests Edge TPU delegate availability.
+
+Uses ai-edge-litert (LiteRT) to check if libedgetpu is installed and
+an Edge TPU device is accessible. Outputs JSON to stdout for Aegis
+skill deployment verification.
+
+Usage:
+ python scripts/tpu_probe.py
+"""
+
+import json
+import sys
+from pathlib import Path
+
+# ─── Windows DLL search path fix (MUST happen before any native import) ───────
+# Python 3.8+ no longer searches PATH for DLLs loaded by native C extensions.
+_LIB_DIR = Path(__file__).parent.parent / "lib"
+if sys.platform == "win32" and _LIB_DIR.exists():
+ import os
+ os.add_dll_directory(str(_LIB_DIR))
+
+def _edgetpu_lib_name():
+ """Return the platform-specific libedgetpu shared library name."""
+ import platform
+ from pathlib import Path
+ system = platform.system()
+ if system == "Linux":
+ return "libedgetpu.so.1"
+ elif system == "Darwin":
+ return "libedgetpu.1.dylib"
+ elif system == "Windows":
+ local_dll = _LIB_DIR / "edgetpu.dll"
+ if local_dll.exists():
+ return str(local_dll.resolve())
+ return "edgetpu.dll"
+ return "libedgetpu.so.1"
+
+
+def probe_tpus():
+ """Test Edge TPU delegate loading and return probe info dict."""
+ result = {
+ "event": "tpu_probe",
+ "available": False,
+ "count": 0,
+ "devices": [],
+ "runtime": None,
+ "error": None,
+ }
+
+ # Check ai-edge-litert availability
+ try:
+ from ai_edge_litert import interpreter as litert
+ result["runtime"] = "ai-edge-litert"
+ except ImportError:
+ result["runtime"] = None
+ result["error"] = "ai-edge-litert not installed. Run: pip install ai-edge-litert"
+ return result
+
+ # Try loading Edge TPU delegate
+ edgetpu_lib = _edgetpu_lib_name()
+ try:
+ delegate = litert.load_delegate(edgetpu_lib)
+ result["available"] = True
+ result["count"] = 1
+ result["devices"].append({
+ "index": 0,
+ "type": "usb",
+ "delegate": edgetpu_lib,
+ })
+ except (ValueError, OSError) as e:
+ error_str = str(e)
+ if "libedgetpu" in error_str.lower() or "not found" in error_str.lower():
+ result["error"] = f"libedgetpu not installed: {error_str}"
+ else:
+ result["error"] = f"Edge TPU not accessible: {error_str}"
+
+ # Check USB devices for additional context (Linux only)
+ try:
+ import subprocess
+ lsusb = subprocess.run(
+ ["lsusb"], capture_output=True, text=True, timeout=5
+ )
+ coral_lines = [
+ line.strip() for line in lsusb.stdout.splitlines()
+ if "1a6e" in line.lower() or "18d1" in line.lower() # Global Unichip / Google
+ or "coral" in line.lower() or "edge tpu" in line.lower()
+ ]
+ if coral_lines:
+ result["usb_devices"] = coral_lines
+ except (FileNotFoundError, subprocess.TimeoutExpired):
+ pass # lsusb not available (macOS, Windows)
+
+ return result
+
+
+def main():
+ result = probe_tpus()
+ print(json.dumps(result, indent=2))
+
+ # Human-readable summary to stderr
+ if result["available"]:
+ sys.stderr.write(f"✓ Found {result['count']} Edge TPU device(s)\n")
+ for dev in result["devices"]:
+ sys.stderr.write(f" [{dev['index']}] {dev['type']} via {dev.get('delegate', '?')}\n")
+ else:
+ sys.stderr.write("✗ No Edge TPU detected\n")
+ if result["error"]:
+ sys.stderr.write(f" Error: {result['error']}\n")
+
+ # Exit code: 0 if TPU found, 1 if not
+ sys.exit(0 if result["available"] else 1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/wsl_wrapper.cjs b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/wsl_wrapper.cjs
new file mode 100644
index 00000000..a318d6dd
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/scripts/wsl_wrapper.cjs
@@ -0,0 +1,59 @@
+const { spawn } = require('child_process');
+const path = require('path');
+
+// Ensure usbipd is on PATH (Aegis might have booted before the MSI updated System PATH)
+const env = { ...process.env };
+if (process.platform === 'win32' && (!env.PATH || !env.PATH.toLowerCase().includes('usbipd-win'))) {
+ env.PATH = `${env.PATH || ''};C:\\Program Files\\usbipd-win\\`;
+}
+
+// 1. Spawan usbipd auto-attach process in the background
+// This guarantees that the Google Coral USB Accelerator is actively passed
+// to the WSL linux kernel as soon as this inference script starts!
+const attachProcess = spawn('usbipd', ['attach', '--wsl', '--auto-attach', '--hardware-id', '18d1:9302'], {
+ env,
+ stdio: 'ignore', // We do not want usbipd logs corrupting the JSONL stdout stream!
+ detached: true
+});
+
+// The absolute path to the skill directory, derived from this script's location
+const skillDir = path.resolve(__dirname, '..');
+const wslSkillDir = skillDir.replace(/\\/g, '/').replace(/^([a-zA-Z]):/, (match, p1) => `/mnt/${p1.toLowerCase()}`);
+
+// Command to run the actual detect.py script inside WSL
+// Stdbuf guarantees line-buffering across the WSL boundary so JSONL events stream instantly
+const wslCommand = `cd "${wslSkillDir}" && source wsl_venv/bin/activate && stdbuf -oL python3.9 scripts/detect.py`;
+
+// We don't want wsl to launch a login shell, just bash -c
+const child = spawn('wsl.exe', ['-u', 'root', '-e', 'bash', '-c', wslCommand], {
+ stdio: ['pipe', 'pipe', 'pipe']
+});
+
+// Proxy STDIN (Aegis-AI -> WSL)
+process.stdin.pipe(child.stdin);
+
+// Proxy STDOUT (WSL -> Aegis-AI)
+child.stdout.pipe(process.stdout);
+
+// Proxy STDERR directly to process.stderr so Aegis logs it
+child.stderr.pipe(process.stderr);
+
+// When WSL exits (e.g., from Aegis stopping the inference agent)
+child.on('exit', (code) => {
+ // Kill the background auto-attach loop
+ try {
+ process.kill(-attachProcess.pid); // Kill process group
+ } catch(e) {
+ attachProcess.kill();
+ }
+ process.exit(code || 0);
+});
+
+// Handle graceful terminate
+process.on('SIGINT', () => {
+ child.kill('SIGINT');
+});
+
+process.on('SIGTERM', () => {
+ child.kill('SIGTERM');
+});
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/skills.json b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/skills.json
new file mode 100644
index 00000000..a2ad60ae
--- /dev/null
+++ b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/skills.json
@@ -0,0 +1,315 @@
+{
+ "version": "1.0.0",
+ "repository": "SharpAI/DeepCamera",
+ "updated": "2026-03-02",
+ "description": "AI skill catalog for SharpAI Aegis. Each skill is a self-contained folder with a SKILL.md manifest.",
+ "categories": {
+ "detection": "Object detection, person recognition, visual grounding",
+ "analysis": "VLM scene understanding, interactive segmentation",
+ "transformation": "Depth estimation, style transfer, video effects",
+ "privacy": "Privacy transforms — depth maps, blur, anonymization for blind mode",
+ "annotation": "Dataset labeling, COCO export, training data",
+ "segmentation": "Pixel-level object segmentation — SAM2, interactive masks",
+ "training": "Model fine-tuning, hardware-optimized export, deployment",
+ "camera-providers": "Camera brand integrations — clip feed, live stream",
+ "streaming": "RTSP/WebRTC live view via go2rtc",
+ "channels": "Messaging platform channels for Clawdbot agent",
+ "automation": "MQTT, webhooks, Home Assistant triggers",
+ "integrations": "Smart home and IoT platform bridges"
+ },
+ "skills": [
+ {
+ "id": "home-security-benchmark",
+ "name": "Home Security AI Benchmark",
+ "description": "LLM & VLM evaluation suite for home security AI — tests dedup, classification, tool use, and scene analysis.",
+ "version": "1.0.0",
+ "category": "analysis",
+ "path": "skills/analysis/home-security-benchmark",
+ "tags": [
+ "benchmark",
+ "llm",
+ "vlm",
+ "testing",
+ "evaluation",
+ "security"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "node": ">=18",
+ "ram_gb": 1
+ },
+ "capabilities": [
+ "benchmark",
+ "report_generation"
+ ],
+ "ui_unlocks": [
+ "benchmark_report"
+ ]
+ },
+ {
+ "id": "yolo-detection-2026",
+ "name": "YOLO 2026",
+ "description": "State-of-the-art real-time object detection — 80+ COCO classes, bounding box overlays, multi-size model selection.",
+ "version": "1.0.0",
+ "category": "detection",
+ "path": "skills/detection/yolo-detection-2026",
+ "tags": [
+ "detection",
+ "yolo",
+ "object-detection",
+ "real-time",
+ "coco"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "python": ">=3.9",
+ "ram_gb": 2
+ },
+ "capabilities": [
+ "live_detection",
+ "bbox_overlay"
+ ],
+ "ui_unlocks": [
+ "detection_overlay",
+ "detection_results"
+ ],
+ "fps_presets": [
+ 0.2,
+ 0.5,
+ 1,
+ 3,
+ 5,
+ 15
+ ],
+ "model_sizes": [
+ "nano",
+ "small",
+ "medium",
+ "large"
+ ]
+ },
+ {
+ "id": "yolo-detection-2026-coral-tpu",
+ "name": "YOLO 2026 Coral TPU",
+ "description": "Google Coral Edge TPU — real-time object detection with LiteRT (INT8, ~4ms inference at 320×320).",
+ "version": "2.0.0",
+ "category": "detection",
+ "path": "skills/detection/yolo-detection-2026-coral-tpu",
+ "tags": [
+ "detection",
+ "yolo",
+ "coral",
+ "edge-tpu",
+ "litert",
+ "real-time",
+ "coco"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "python": ">=3.9",
+ "system": "libedgetpu",
+ "hardware": "Google Coral USB Accelerator"
+ },
+ "capabilities": [
+ "live_detection",
+ "bbox_overlay"
+ ],
+ "ui_unlocks": [
+ "detection_overlay",
+ "detection_results"
+ ],
+ "fps_presets": [
+ 0.2,
+ 0.5,
+ 1,
+ 3,
+ 5,
+ 15
+ ],
+ "model_sizes": [
+ "nano"
+ ]
+ },
+ {
+ "id": "camera-claw",
+ "name": "Camera Claw",
+ "description": "Security camera for your AI agent — sandbox, record, and monitor OpenClaw activity.",
+ "version": "2026.3.12",
+ "category": "integrations",
+ "url": "https://github.com/SharpAI/CameraClaw",
+ "repo_url": "https://github.com/SharpAI/CameraClaw",
+ "code_structure": [
+ { "path": "SKILL.md", "desc": "Aegis skill manifest (11 params)" },
+ { "path": "package.json", "desc": "Node.js dependencies" },
+ { "path": "config.yaml", "desc": "Default params" },
+ { "path": "deploy.sh", "desc": "Node.js + Docker bootstrapper" },
+ { "path": "deploy.bat", "desc": "Windows bootstrapper" },
+ { "path": "scripts/monitor.js", "desc": "Main entry — Docker orchestrator + JSONL protocol" },
+ { "path": "scripts/health-check.js", "desc": "Container health checker" },
+ { "path": "docs/aegis_openclaw_note.md", "desc": "Aegis integration requirements" }
+ ],
+ "tags": ["security", "sandbox", "monitoring", "openclaw", "ai-agent"],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "docker": true
+ },
+ "capabilities": [
+ "monitoring",
+ "recording"
+ ]
+ },
+ {
+ "id": "depth-estimation",
+ "name": "Depth Anything V2",
+ "description": "Privacy-first depth map transforms — anonymize camera feeds with Depth Anything v2 while preserving spatial awareness.",
+ "version": "1.1.0",
+ "category": "privacy",
+ "path": "skills/transformation/depth-estimation",
+ "tags": [
+ "privacy",
+ "depth",
+ "transform",
+ "anonymization",
+ "blind-mode"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "python": ">=3.9",
+ "ram_gb": 2
+ },
+ "capabilities": [
+ "live_transform",
+ "privacy_overlay"
+ ],
+ "ui_unlocks": [
+ "privacy_overlay",
+ "blind_mode"
+ ]
+ },
+ {
+ "id": "model-training",
+ "name": "Model Training",
+ "disabled": true,
+ "description": "Agent-driven YOLO fine-tuning — annotate, train, auto-export to TensorRT/CoreML/OpenVINO, deploy as detection skill.",
+ "version": "1.0.0",
+ "category": "training",
+ "path": "skills/training/model-training",
+ "tags": [
+ "training",
+ "fine-tuning",
+ "yolo",
+ "custom-model",
+ "export"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "python": ">=3.9",
+ "ram_gb": 4
+ },
+ "capabilities": [
+ "fine_tuning",
+ "model_export",
+ "deployment"
+ ]
+ },
+ {
+ "id": "segmentation-sam2",
+ "name": "SAM2 Segmentation",
+ "disabled": true,
+ "description": "Interactive click-to-segment using Segment Anything 2 — pixel-perfect masks, point/box prompts, video tracking.",
+ "version": "1.0.0",
+ "category": "segmentation",
+ "path": "skills/segmentation/sam2-segmentation",
+ "tags": [
+ "annotation",
+ "segmentation",
+ "sam2",
+ "labeling",
+ "masks"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "python": ">=3.9",
+ "ram_gb": 4
+ },
+ "capabilities": [
+ "interactive_segmentation",
+ "video_tracking"
+ ]
+ },
+ {
+ "id": "annotation-data",
+ "name": "Annotation Data",
+ "disabled": true,
+ "description": "Dataset annotation management — COCO labels, sequences, export, and Kaggle upload for Annotation Studio.",
+ "version": "1.0.0",
+ "category": "annotation",
+ "path": "skills/annotation/dataset-management",
+ "tags": [
+ "annotation",
+ "dataset",
+ "coco",
+ "labeling"
+ ],
+ "platforms": [
+ "linux-x64",
+ "linux-arm64",
+ "darwin-arm64",
+ "darwin-x64",
+ "win-x64"
+ ],
+ "requirements": {
+ "python": ">=3.9"
+ },
+ "capabilities": [
+ "dataset_management",
+ "coco_export"
+ ],
+ "ui_unlocks": [
+ "annotation_studio"
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/test2.jpg b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/test2.jpg
new file mode 100644
index 00000000..6d0b17e9
Binary files /dev/null and b/skills/detection/yolo-detection-2026-coral-tpu-win-wsl/test2.jpg differ
diff --git a/skills/detection/yolo-detection-2026-coral-tpu/deploy.bat b/skills/detection/yolo-detection-2026-coral-tpu/deploy.bat
deleted file mode 100644
index b730f162..00000000
--- a/skills/detection/yolo-detection-2026-coral-tpu/deploy.bat
+++ /dev/null
@@ -1,216 +0,0 @@
-@echo off
-REM deploy.bat — Coral TPU Detection Skill installer for Windows
-REM
-REM What this does:
-REM 1. Downloads + installs the Edge TPU runtime (edgetpu.dll) via UAC
-REM 2. Creates a Python virtual environment (Python 3.9–3.11 recommended)
-REM 3. Installs ai-edge-litert and image processing deps
-REM 4. Verifies the compiled yolo26n_edgetpu.tflite model is present
-REM 5. Probes for an Edge TPU device
-REM
-REM Note: pycoral is NOT used. detect.py uses ai-edge-litert directly,
-REM which supports Python 3.9–3.13 and does not require pycoral.
-REM
-REM Exit codes:
-REM 0 = success (TPU detected and ready)
-REM 1 = fatal error
-REM 2 = partial success (no TPU detected, CPU fallback available)
-
-setlocal enabledelayedexpansion
-
-set "SKILL_DIR=%~dp0"
-set "LOG_PREFIX=[coral-tpu-deploy]"
-
-REM Ensure we run inside the skill folder
-cd /d "%SKILL_DIR%"
-
-echo %LOG_PREFIX% Platform: Windows 1>&2
-echo {"event": "progress", "stage": "platform", "message": "Windows installer starting..."}
-
-REM ─── Step 1: Edge TPU Runtime (UAC elevated install) ─────────────────────────
-REM Download the official Google Edge TPU runtime for Windows and install it.
-REM This places edgetpu.dll into C:\Windows\System32 (requires admin rights).
-
-echo %LOG_PREFIX% Downloading Edge TPU runtime... 1>&2
-echo {"event": "progress", "stage": "platform", "message": "Downloading Google Edge TPU runtime (edgetpu.dll)..."}
-
-set "TMP_DIR=%TEMP%\coral_tpu_install_%RANDOM%"
-mkdir "%TMP_DIR%"
-cd /d "%TMP_DIR%"
-
-powershell -NoProfile -Command ^
- "Invoke-WebRequest -Uri 'https://github.com/google-coral/libedgetpu/releases/download/release-grouper/edgetpu_runtime_20221024.zip' -OutFile 'edgetpu_runtime_20221024.zip' -UseBasicParsing"
-if %errorlevel% neq 0 (
- echo %LOG_PREFIX% ERROR: Failed to download Edge TPU runtime. Check internet connectivity. 1>&2
- echo {"event": "error", "stage": "platform", "message": "Download failed — check internet connectivity"}
- cd /d "%SKILL_DIR%"
- rmdir /S /Q "%TMP_DIR%" 2>nul
- exit /b 1
-)
-
-powershell -NoProfile -Command ^
- "Expand-Archive -Path 'edgetpu_runtime_20221024.zip' -DestinationPath '.' -Force"
-cd edgetpu_runtime
-
-echo %LOG_PREFIX% Prompting for Administrator rights to install drivers... 1>&2
-echo {"event": "progress", "stage": "platform", "message": "A UAC prompt will appear. Approve it to install edgetpu.dll system-wide."}
-
-REM Run install.bat elevated and wait for it to complete.
-REM The 'nul
- exit /b 1
-)
-
-cd /d "%SKILL_DIR%"
-rmdir /S /Q "%TMP_DIR%" 2>nul
-echo %LOG_PREFIX% Edge TPU runtime installed. 1>&2
-echo {"event": "progress", "stage": "platform", "message": "Edge TPU runtime installed successfully."}
-
-REM ─── Step 2: Find Python ─────────────────────────────────────────────────────
-REM ai-edge-litert supports Python 3.9–3.13. We prefer the system default.
-REM If only Python 3.12+ is available, it still works (no pycoral needed).
-
-set "PYTHON_CMD="
-
-REM Try common Python launchers in preference order
-for %%P in (python python3 py) do (
- if not defined PYTHON_CMD (
- %%P --version >nul 2>&1
- if !errorlevel! equ 0 (
- set "PYTHON_CMD=%%P"
- )
- )
-)
-
-if not defined PYTHON_CMD (
- echo %LOG_PREFIX% ERROR: Python not found on PATH. 1>&2
- echo {"event": "error", "stage": "python", "message": "Python not found — install Python 3.9-3.11 from python.org and re-run"}
- exit /b 1
-)
-
-REM Get Python version for info only (not blocking — ai-edge-litert works 3.9-3.13)
-for /f "tokens=2" %%V in ('!PYTHON_CMD! --version 2^>^&1') do set "PY_VERSION=%%V"
-echo %LOG_PREFIX% Python version: !PY_VERSION! 1>&2
-echo {"event": "progress", "stage": "python", "message": "Using Python !PY_VERSION!"}
-
-REM ─── Step 3: Create virtual environment ──────────────────────────────────────
-
-set "VENV_DIR=%SKILL_DIR%venv"
-echo %LOG_PREFIX% Creating virtual environment at %VENV_DIR%... 1>&2
-echo {"event": "progress", "stage": "build", "message": "Creating Python virtual environment..."}
-
-!PYTHON_CMD! -m venv "%VENV_DIR%"
-
-if not exist "%VENV_DIR%\Scripts\python.exe" (
- echo %LOG_PREFIX% ERROR: Failed to create virtual environment. 1>&2
- echo {"event": "error", "stage": "build", "message": "venv creation failed"}
- exit /b 1
-)
-
-REM ─── Step 4: Install Python dependencies ─────────────────────────────────────
-REM ai-edge-litert: LiteRT runtime with Edge TPU delegate support (Python 3.9-3.13)
-REM numpy + Pillow: image processing
-
-echo %LOG_PREFIX% Upgrading pip... 1>&2
-"%VENV_DIR%\Scripts\python.exe" -m pip install --upgrade pip --quiet
-
-echo %LOG_PREFIX% Installing dependencies (ai-edge-litert, numpy, Pillow)... 1>&2
-echo {"event": "progress", "stage": "build", "message": "Installing ai-edge-litert and image processing libraries..."}
-
-"%VENV_DIR%\Scripts\python.exe" -m pip install -r "%SKILL_DIR%requirements.txt" --quiet
-if %errorlevel% neq 0 (
- echo %LOG_PREFIX% ERROR: pip install failed. 1>&2
- echo {"event": "error", "stage": "build", "message": "pip install requirements.txt failed"}
- exit /b 1
-)
-
-echo %LOG_PREFIX% Dependencies installed. 1>&2
-echo {"event": "progress", "stage": "build", "message": "Python dependencies installed successfully."}
-
-REM ─── Step 5: Verify compiled EdgeTPU model ────────────────────────────────────
-REM The yolo26n_edgetpu.tflite is pre-compiled via docker/compile.sh and committed
-REM to the git repository. deploy.bat does NOT compile it — that requires Linux.
-
-echo %LOG_PREFIX% Checking for compiled EdgeTPU model... 1>&2
-
-set "MODEL_FOUND=false"
-set "MODEL_FILE="
-
-REM Accept either naming convention from edgetpu_compiler output
-for %%M in (
- "%SKILL_DIR%models\yolo26n_int8_edgetpu.tflite"
- "%SKILL_DIR%models\yolo26n_edgetpu.tflite"
- "%SKILL_DIR%models\yolo26n_320_edgetpu.tflite"
-) do (
- if exist %%M (
- set "MODEL_FOUND=true"
- set "MODEL_FILE=%%~M"
- )
-)
-
-if "!MODEL_FOUND!"=="false" (
- echo %LOG_PREFIX% WARNING: No pre-compiled EdgeTPU model found in models\. 1>&2
- echo {"event": "progress", "stage": "model", "message": "No EdgeTPU model found — will fall back to CPU inference (SSD MobileNet)"}
-
- REM Download SSD MobileNet as a CPU fallback so the skill is functional immediately
- echo %LOG_PREFIX% Downloading SSD MobileNet CPU fallback model... 1>&2
- if not exist "%SKILL_DIR%models" mkdir "%SKILL_DIR%models"
-
- powershell -NoProfile -Command ^
- "Invoke-WebRequest -Uri 'https://github.com/google-coral/edgetpu/raw/master/test_data/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite' -OutFile '%SKILL_DIR%models\ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite' -UseBasicParsing" 2>nul
- powershell -NoProfile -Command ^
- "Invoke-WebRequest -Uri 'https://github.com/google-coral/edgetpu/raw/master/test_data/ssd_mobilenet_v2_coco_quant_postprocess.tflite' -OutFile '%SKILL_DIR%models\ssd_mobilenet_v2_coco_quant_postprocess.tflite' -UseBasicParsing" 2>nul
-
- echo {"event": "progress", "stage": "model", "message": "SSD MobileNet fallback downloaded. For YOLO 2026, run docker/compile.sh on Linux or macOS."}
-) else (
- echo %LOG_PREFIX% Found model: !MODEL_FILE! 1>&2
- echo {"event": "progress", "stage": "model", "message": "Edge TPU model ready: yolo26n_edgetpu.tflite"}
-)
-
-REM ─── Step 6: Probe for Edge TPU devices ──────────────────────────────────────
-
-echo %LOG_PREFIX% Probing for Edge TPU devices... 1>&2
-echo {"event": "progress", "stage": "probe", "message": "Checking for Coral USB Accelerator..."}
-
-set "TPU_FOUND=false"
-set "PROBE_JSON="
-
-for /f "delims=" %%I in ('"%VENV_DIR%\Scripts\python.exe" "%SKILL_DIR%scripts\tpu_probe.py" 2^>nul') do (
- set "PROBE_JSON=%%I"
-)
-
-echo !PROBE_JSON! | findstr /C:"\"available\": true" >nul 2>&1
-if %errorlevel% equ 0 (
- set "TPU_FOUND=true"
- echo %LOG_PREFIX% Edge TPU detected. 1>&2
- echo {"event": "progress", "stage": "probe", "message": "Coral USB Accelerator detected and ready."}
-) else (
- echo %LOG_PREFIX% No Edge TPU detected (device may not be plugged in). 1>&2
- echo {"event": "progress", "stage": "probe", "message": "No Edge TPU detected. Plug in the Coral USB Accelerator and restart the skill."}
-)
-
-REM ─── Step 7: Done ────────────────────────────────────────────────────────────
-
-if "!TPU_FOUND!"=="true" (
- echo {"event": "complete", "status": "success", "tpu_found": true, "message": "Coral TPU skill installed and Edge TPU is ready."}
- exit /b 0
-) else (
- echo {"event": "complete", "status": "partial", "tpu_found": false, "message": "Coral TPU skill installed. Plug in your Coral USB Accelerator to enable hardware acceleration."}
- exit /b 0
-)