From 7b07cd955e73c03abbd85c43dd8aa29e2edfe7e4 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 14:03:38 -0400 Subject: [PATCH 01/22] Add Portable AI Kit integration spec --- docs/integration/portable-ai-kit.md | 184 ++++++++++++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 docs/integration/portable-ai-kit.md diff --git a/docs/integration/portable-ai-kit.md b/docs/integration/portable-ai-kit.md new file mode 100644 index 0000000..50c86dd --- /dev/null +++ b/docs/integration/portable-ai-kit.md @@ -0,0 +1,184 @@ +# SourceOS Portable AI Kit + +SourceOS Portable AI Kit is the pocketable local-AI appliance mode for SourceOS. + +The product promise is deliberately simple: prepare a USB drive or portable SSD once, carry a governed local AI workstation, and run it on a supported host without sending prompts or chat state off-device by default. + +This workstream absorbs the useful product pattern from simple portable Ollama/AnythingLLM USB projects, but raises the bar to SourceOS requirements: signed manifests, explicit model provenance, policy gates, host-write auditability, secret-free evidence, and integration with Agent Machine, Local Model Door, model-router, AgentPlane, Policy Fabric, TurtleTerm, AgentTerm, and BearBrowser. + +## Position + +Portable AI Kit is not a new agent brain, model registry, or governance authority. + +It is the installable workstation-side product surface for: + +- portable-root preflight checks; +- portable model pack planning; +- USB/SSD layout materialization; +- runtime launch planning; +- zero-trace and host-write audit posture; +- local model route evidence; +- TurtleTerm/AgentTerm/BearBrowser launch handoff; +- Agent Machine runtime receipts. + +## User-facing command surface + +Initial commands: + +```text +sourceosctl portable-ai preflight [--benchmark] +sourceosctl portable-ai profiles +sourceosctl portable-ai prepare --profile laptop-safe --dry-run +sourceosctl portable-ai prepare --profile laptop-safe --execute --policy-ok +sourceosctl portable-ai start-plan --surface turtleterm +sourceosctl portable-ai inspect +sourceosctl portable-ai evidence inspect +``` + +All commands are read-only or dry-run by default. Materialization requires `--execute --policy-ok` and emits evidence. Runtime start remains a plan until Agent Machine activation gates are present. + +## Portable root layout + +```text +SOURCEOS_AI/ +├── manifests/ +│ ├── portable-ai-root.json +│ └── model-carry-pack.*.json +├── runtimes/ +│ ├── ollama/ +│ ├── llama-cpp/ +│ └── openai-compatible-local/ +├── models/ +│ ├── blobs/ +│ └── modelfiles/ +├── cache/ +│ ├── embeddings/ +│ ├── retrieval/ +│ └── prompt-prefix/ +├── state/ +│ ├── chat/ +│ ├── workrooms/ +│ └── routes/ +├── surfaces/ +│ ├── turtleterm/ +│ ├── agent-term/ +│ └── bearbrowser/ +├── evidence/ +│ ├── preflight/ +│ ├── materialization/ +│ ├── activation/ +│ └── wipe/ +└── tmp/ +``` + +The portable root must not contain inline credentials, private keys, cloud tokens, or user enrollment secrets. + +## Preflight requirements + +`preflight` must check: + +- target root exists or parent is writable; +- filesystem large-file support is suitable for GGUF/model blobs; +- free space meets selected model-pack class; +- mount is not read-only; +- optional read/write benchmark; +- CPU architecture; +- RAM class; +- local runtime availability; +- existing manifest validity; +- evidence directory writability; +- whether the target appears removable or explicitly user-approved. + +The command must return structured JSON so TurtleTerm, AgentTerm, BearBrowser, and the website can render the same decision. + +## Profiles + +Initial portable profiles: + +| Profile | Purpose | Minimum target | Default posture | +| --- | --- | --- | --- | +| `tiny-router` | local routing, triage, rewrite | 8 GB free | local-only, no tools | +| `laptop-safe` | general offline fallback and Office assist | 16 GB free | local-only, no prompt egress | +| `office-local` | Office Plane summarization and document assistance | 32 GB free | workroom-scoped | +| `code-local` | local coding assistant and repo triage | 32 GB free | repo-scoped | +| `field-kit` | portable SSD field/operator kit | 64 GB free | evidence-first | +| `byom-gguf` | bring-your-own GGUF import | varies | manifest + hash required | + +## Security posture + +Default policy: + +- prompt egress denied; +- model downloads require explicit operator action; +- model blobs require signed or pinned-hash manifests; +- host `$HOME` writes denied by default; +- host cache writes denied by default; +- runtime ports bind to loopback only unless policy grants otherwise; +- tool use denied by default; +- wipe receipts required for zero-trace mode; +- evidence stores hashes and refs, not prompt bodies. + +## Launch posture + +A launch plan should describe what would start, not start it implicitly: + +- runtime provider: Ollama-compatible, llama.cpp, MLX/oMLX compatibility, or OpenAI-compatible local server; +- model profile and local model reference; +- portable state root; +- localhost port binding; +- selected surface: TurtleTerm, AgentTerm, BearBrowser, or local web fallback; +- required Agent Machine activation decision; +- expected evidence outputs. + +## Capability parity target + +The first parity target is to match simple portable USB projects at the product surface while exceeding them on governance. + +| Capability | Required SourceOS answer | +| --- | --- | +| USB/SSD preflight | `portable-ai preflight` with JSON evidence | +| Model menu | signed `ModelCarryPack` profiles | +| Custom GGUF | BYOM import plan with hash/license/provenance checks | +| Local chat UI | TurtleTerm plus local web fallback; optional AnythingLLM adapter | +| Ollama support | runtime profile, not sole authority | +| Offline mode | prompt egress denied and model route local-only by default | +| Zero trace | portable state root plus host-write audit and wipe receipt | +| Multi-platform | Linux-first, macOS/Windows compatibility as adapters | +| Safe shutdown | Agent Machine activation/teardown receipts | +| User clarity | one prepare path, one inspect path, one start plan | + +## Acceptance criteria + +M1 is complete when: + +1. `sourceosctl portable-ai preflight` renders structured JSON without mutating the target. +2. `sourceosctl portable-ai profiles` lists built-in portable profiles. +3. `sourceosctl portable-ai prepare --dry-run` renders a portable-root materialization plan. +4. `sourceosctl portable-ai prepare --execute --policy-ok` creates only declared directories and writes evidence. +5. `sourceosctl portable-ai start-plan` renders a launch plan without starting daemons. +6. `sourceos-model-carry` owns the portable model-pack schema and examples. +7. `agent-machine` owns runtime activation and teardown evidence semantics. +8. README documents the one-command demo path. + +M2 is complete when: + +1. model-pack manifests include pinned hashes and license/provenance fields; +2. BYOM GGUF import validates file presence and hash before route eligibility; +3. TurtleTerm can consume `start-plan` output; +4. Agent Machine emits portable runtime receipts; +5. website/product docs present the portable kit as a first-class SourceOS capability. + +## Integration homes + +| Repo | Responsibility | +| --- | --- | +| `SourceOS-Linux/sourceos-devtools` | CLI, preflight, prepare, inspect, launch-plan surface | +| `SourceOS-Linux/sourceos-model-carry` | portable model-pack schemas, examples, provenance expectations | +| `SourceOS-Linux/agent-machine` | provider activation, teardown, cache/model residency receipts | +| `SourceOS-Linux/sourceos-spec` | promoted stable contracts | +| `SourceOS-Linux/TurtleTerm` | first-class terminal UI surface | +| `SourceOS-Linux/agent-term` | ChatOps/operator event surface | +| `SocioProphet/model-router` | governed local/hosted routing | +| `SocioProphet/policy-fabric` | side-effect and prompt-egress policy | +| `SocioProphet/agentplane` | run/evidence submission and replay | +| `SocioProphet/prophet-workspace` | workroom and artifact semantics | From 4de98b12d533a532eab02a1e2bb4bd884e391cbb Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 14:20:59 -0400 Subject: [PATCH 02/22] Add portable AI command implementation scaffold --- sourceosctl/commands/portable_ai.py | 373 ++++++++++++++++++++++++++++ 1 file changed, 373 insertions(+) create mode 100644 sourceosctl/commands/portable_ai.py diff --git a/sourceosctl/commands/portable_ai.py b/sourceosctl/commands/portable_ai.py new file mode 100644 index 0000000..31c72d2 --- /dev/null +++ b/sourceosctl/commands/portable_ai.py @@ -0,0 +1,373 @@ +"""Portable AI Kit helpers. + +This module renders portable-AI preflight, profile, prepare, and launch plans. +It does not download model weights, start daemons, run inference, or write outside +an explicitly-approved portable root. +""" + +from __future__ import annotations + +import datetime as _dt +import json +import os +import platform +import shutil +import stat +import sys +from pathlib import Path +from typing import Any, Dict + + +PORTABLE_LAYOUT_VERSION = "sourceos.portable-ai/v1alpha1" + +PORTABLE_PROFILES: dict[str, dict[str, Any]] = { + "tiny-router": { + "displayName": "Tiny Router Kit", + "minimumFreeGb": 8, + "recommendedFreeGb": 16, + "roles": ["router", "triage", "rewrite", "summarization"], + "surfaces": ["turtleterm", "agent-term", "local-web"], + "policy": {"promptEgressDefault": "deny", "toolUseDefault": "deny", "hostWritesDefault": "deny"}, + }, + "laptop-safe": { + "displayName": "Laptop-safe Portable AI Kit", + "minimumFreeGb": 16, + "recommendedFreeGb": 32, + "roles": ["offline-fallback", "office-assist", "privacy-first-chat", "rewrite"], + "surfaces": ["turtleterm", "bearbrowser", "agent-term", "local-web"], + "policy": {"promptEgressDefault": "deny", "toolUseDefault": "deny", "hostWritesDefault": "deny"}, + }, + "office-local": { + "displayName": "Office-local Portable AI Kit", + "minimumFreeGb": 32, + "recommendedFreeGb": 64, + "roles": ["office-assist", "summarization", "artifact-drafting", "workroom-local"], + "surfaces": ["bearbrowser", "turtleterm", "local-web"], + "policy": {"promptEgressDefault": "deny", "toolUseDefault": "deny", "hostWritesDefault": "workroom-scoped"}, + }, + "code-local": { + "displayName": "Code-local Portable AI Kit", + "minimumFreeGb": 32, + "recommendedFreeGb": 64, + "roles": ["coding-assist", "repo-triage", "rewrite", "summarization"], + "surfaces": ["turtleterm", "agent-term", "local-web"], + "policy": {"promptEgressDefault": "deny", "toolUseDefault": "deny", "hostWritesDefault": "repo-scoped"}, + }, + "field-kit": { + "displayName": "Field Operator Portable AI Kit", + "minimumFreeGb": 64, + "recommendedFreeGb": 128, + "roles": ["offline-fallback", "operator-assist", "evidence-inspection", "field-workroom"], + "surfaces": ["turtleterm", "agent-term", "bearbrowser", "local-web"], + "policy": {"promptEgressDefault": "deny", "toolUseDefault": "deny", "hostWritesDefault": "evidence-scoped"}, + }, + "byom-gguf": { + "displayName": "Bring-your-own GGUF Portable Kit", + "minimumFreeGb": 8, + "recommendedFreeGb": 64, + "roles": ["operator-selected"], + "surfaces": ["turtleterm", "agent-term", "local-web"], + "policy": {"promptEgressDefault": "deny", "toolUseDefault": "deny", "hostWritesDefault": "deny", "requiresHashBeforeEligibility": True}, + }, +} + +PORTABLE_DIRS = [ + "manifests", + "runtimes/ollama", + "runtimes/llama-cpp", + "runtimes/openai-compatible-local", + "models/blobs", + "models/modelfiles", + "cache/embeddings", + "cache/retrieval", + "cache/prompt-prefix", + "state/chat", + "state/workrooms", + "state/routes", + "surfaces/turtleterm", + "surfaces/agent-term", + "surfaces/bearbrowser", + "evidence/preflight", + "evidence/materialization", + "evidence/activation", + "evidence/wipe", + "tmp", +] + + +def _now() -> str: + return _dt.datetime.now(_dt.timezone.utc).isoformat() + + +def _print_json(payload: Dict[str, Any]) -> int: + print(json.dumps(payload, indent=2, sort_keys=True)) + return 0 + + +def _target(path_value: str) -> Path: + return Path(path_value).expanduser().resolve() + + +def _disk_usage_gb(path: Path) -> dict[str, float | None]: + probe = path if path.exists() else path.parent + try: + total, used, free = shutil.disk_usage(probe) + except FileNotFoundError: + return {"totalGb": None, "usedGb": None, "freeGb": None} + gb = 1024 ** 3 + return { + "totalGb": round(total / gb, 2), + "usedGb": round(used / gb, 2), + "freeGb": round(free / gb, 2), + } + + +def _writable(path: Path) -> bool: + probe = path if path.exists() else path.parent + return probe.exists() and os.access(probe, os.W_OK) + + +def _large_file_warning(path: Path) -> str | None: + # Python's stdlib does not expose portable fs type for every platform. + # Keep this conservative; Linux/macOS launchers can add richer fs probing. + name = str(path).lower() + if "fat32" in name or "vfat" in name: + return "target path appears to reference FAT32/VFAT; GGUF files larger than 4GB may fail" + return None + + +def _runtime_paths() -> dict[str, str | None]: + return { + "ollama": shutil.which("ollama"), + "llama-cpp": shutil.which("llama-server") or shutil.which("llama.cpp"), + "python3": shutil.which("python3"), + } + + +def _profile(name: str) -> dict[str, Any]: + try: + return PORTABLE_PROFILES[name] + except KeyError: + known = ", ".join(sorted(PORTABLE_PROFILES)) + raise SystemExit(f"unknown portable AI profile: {name}; known profiles: {known}") + + +def profiles(args) -> int: + return _print_json( + { + "type": "PortableAIProfiles", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "profiles": PORTABLE_PROFILES, + "policy": { + "defaultMutability": "dry-run", + "modelDownloads": "explicit-only", + "promptEgressDefault": "deny", + "hostWritesDefault": "deny", + }, + } + ) + + +def preflight(args) -> int: + target = _target(args.target_root) + usage = _disk_usage_gb(target) + warning = _large_file_warning(target) + runtime_paths = _runtime_paths() + exists = target.exists() + writable = _writable(target) + free_gb = usage.get("freeGb") + failures: list[str] = [] + warnings: list[str] = [] + + if not exists and not target.parent.exists(): + failures.append("target parent does not exist") + if not writable: + failures.append("target or parent is not writable") + if warning: + warnings.append(warning) + if free_gb is not None and free_gb < 8: + failures.append("less than 8GB free; no built-in portable profile can be prepared safely") + elif free_gb is not None and free_gb < 16: + warnings.append("less than 16GB free; only tiny-router or small BYOM profiles are realistic") + + decision = "fail" if failures else "warn" if warnings else "pass" + + return _print_json( + { + "type": "PortablePreflightEvidence", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target), + "exists": exists, + "writable": writable, + "disk": usage, + "host": { + "system": platform.system(), + "machine": platform.machine(), + "platform": platform.platform(), + }, + "runtimePaths": runtime_paths, + "benchmarkRequested": bool(getattr(args, "benchmark", False)), + "benchmarkPerformed": False, + "largeFileSupportWarning": warning, + "failures": failures, + "warnings": warnings, + "decision": decision, + "mutatesTarget": False, + } + ) + + +def _portable_root_manifest(target: Path, profile_name: str) -> dict[str, Any]: + profile = _profile(profile_name) + return { + "type": "PortableAIRoot", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "id": f"urn:srcos:portable-ai-root:{target.name or 'portable-root'}", + "createdAt": _now(), + "targetRoot": str(target), + "layoutVersion": PORTABLE_LAYOUT_VERSION, + "profile": profile_name, + "profileDisplayName": profile["displayName"], + "directories": PORTABLE_DIRS, + "surfaces": profile["surfaces"], + "roles": profile["roles"], + "policy": { + **profile["policy"], + "modelDownloads": "explicit-only", + "runtimeActivation": "agent-machine-gated", + "bindAddressDefault": "127.0.0.1", + "evidenceRequired": True, + }, + } + + +def prepare(args) -> int: + target = _target(args.target_root) + profile_name = args.profile + profile = _profile(profile_name) + manifest = _portable_root_manifest(target, profile_name) + directories = [str(target / rel) for rel in PORTABLE_DIRS] + + if not getattr(args, "execute", False): + return _print_json( + { + "type": "PortablePreparePlan", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target), + "profile": profile_name, + "profileDetails": profile, + "wouldCreateDirectories": directories, + "wouldWriteManifest": str(target / "manifests" / "portable-ai-root.json"), + "wouldWriteEvidence": bool(getattr(args, "evidence_out", None)), + "wouldDownloadModels": False, + "wouldStartRuntime": False, + "requiresExecuteAndPolicyOk": True, + } + ) + + if not getattr(args, "policy_ok", False): + print("error: --execute requires --policy-ok for portable AI materialization", file=sys.stderr) + return 2 + + target.mkdir(parents=True, exist_ok=True) + for rel in PORTABLE_DIRS: + (target / rel).mkdir(parents=True, exist_ok=True) + + manifest_path = target / "manifests" / "portable-ai-root.json" + manifest_path.write_text(json.dumps(manifest, indent=2, sort_keys=True) + "\n", encoding="utf-8") + + evidence = { + "type": "PortableMaterializationEvidence", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target), + "profile": profile_name, + "createdDirectories": directories, + "manifestPath": str(manifest_path), + "downloadedModels": False, + "startedRuntime": False, + "promptEgressDefault": "deny", + "hostWritesDefault": profile["policy"].get("hostWritesDefault", "deny"), + } + if getattr(args, "evidence_out", None): + Path(args.evidence_out).expanduser().write_text(json.dumps(evidence, indent=2, sort_keys=True) + "\n", encoding="utf-8") + + return _print_json(evidence) + + +def start_plan(args) -> int: + target = _target(args.target_root) + surface = args.surface + manifest_path = target / "manifests" / "portable-ai-root.json" + manifest = None + if manifest_path.exists(): + try: + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + except json.JSONDecodeError: + manifest = {"error": "portable-ai-root.json is not valid JSON"} + + return _print_json( + { + "type": "PortableAIStartPlan", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target), + "manifestPath": str(manifest_path), + "manifestPresent": manifest_path.exists(), + "manifest": manifest, + "surface": surface, + "runtimeProviderOrder": ["llama.cpp", "ollama-compatible", "openai-compatible-local"], + "bindAddress": "127.0.0.1", + "wouldStartRuntime": False, + "requiresAgentMachineActivation": True, + "requiresPolicyAdmission": True, + "requiresAgentRegistryGrant": True, + "promptEgressDefault": "deny", + "hostWritesDefault": "deny", + "routeDescriptorSecretFree": True, + } + ) + + +def inspect(args) -> int: + target = _target(args.target_root) + paths = {rel: (target / rel).exists() for rel in PORTABLE_DIRS} + manifest_path = target / "manifests" / "portable-ai-root.json" + return _print_json( + { + "type": "PortableAIInspect", + "apiVersion": PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target), + "exists": target.exists(), + "manifestPath": str(manifest_path), + "manifestPresent": manifest_path.exists(), + "directories": paths, + "disk": _disk_usage_gb(target), + } + ) + + +def evidence_inspect(args) -> int: + path = Path(args.path).expanduser() + if not path.exists(): + print(f"error: evidence file not found: {path}", file=sys.stderr) + return 1 + try: + payload = json.loads(path.read_text(encoding="utf-8")) + except json.JSONDecodeError as exc: + print(f"error: invalid JSON: {exc}", file=sys.stderr) + return 1 + return _print_json( + { + "path": str(path), + "type": payload.get("type"), + "apiVersion": payload.get("apiVersion"), + "targetRoot": payload.get("targetRoot"), + "decision": payload.get("decision"), + "promptEgressDefault": payload.get("promptEgressDefault"), + "hostWritesDefault": payload.get("hostWritesDefault"), + } + ) From 34488b033636d64271613e3d63983f25d1a2df0a Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 14:24:03 -0400 Subject: [PATCH 03/22] Add portable AI standalone CLI entrypoint --- bin/sourceos-portable-ai | 67 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 bin/sourceos-portable-ai diff --git a/bin/sourceos-portable-ai b/bin/sourceos-portable-ai new file mode 100644 index 0000000..160c421 --- /dev/null +++ b/bin/sourceos-portable-ai @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +"""Standalone SourceOS Portable AI Kit CLI. + +This entrypoint exists so the portable-AI implementation can be exercised before it +is promoted into the main sourceosctl parser. +""" + +from __future__ import annotations + +import argparse +import sys + +from sourceosctl.commands import portable_ai + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="sourceos-portable-ai", + description="SourceOS Portable AI Kit helpers (dry-run / evidence-first surface)", + ) + sub = parser.add_subparsers(dest="command", metavar="") + sub.required = True + + profiles_p = sub.add_parser("profiles", help="List built-in portable AI profiles") + profiles_p.set_defaults(func=portable_ai.profiles) + + preflight_p = sub.add_parser("preflight", help="Inspect a portable root target without mutating it") + preflight_p.add_argument("target_root", help="Target USB/SSD portable root") + preflight_p.add_argument("--benchmark", action="store_true", default=False, help="Reserve flag for future read/write benchmark") + preflight_p.set_defaults(func=portable_ai.preflight) + + prepare_p = sub.add_parser("prepare", help="Render or execute portable root materialization") + prepare_p.add_argument("target_root", help="Target USB/SSD portable root") + prepare_p.add_argument("--profile", default="laptop-safe", choices=sorted(portable_ai.PORTABLE_PROFILES), help="Portable profile") + prepare_p.add_argument("--dry-run", action="store_true", default=True, dest="dry_run", help="Render plan without writing files") + prepare_p.add_argument("--execute", action="store_true", default=False, help="Create declared portable-root directories and manifest") + prepare_p.add_argument("--policy-ok", action="store_true", default=False, help="Confirm policy/operator approval for materialization") + prepare_p.add_argument("--evidence-out", default=None, help="Optional evidence JSON path") + prepare_p.set_defaults(func=portable_ai.prepare) + + start_p = sub.add_parser("start-plan", help="Render a local runtime/surface launch plan without starting daemons") + start_p.add_argument("target_root", help="Target USB/SSD portable root") + start_p.add_argument("--surface", default="turtleterm", choices=["turtleterm", "agent-term", "bearbrowser", "local-web", "anythingllm-adapter"], help="Launch surface") + start_p.set_defaults(func=portable_ai.start_plan) + + inspect_p = sub.add_parser("inspect", help="Inspect portable root layout state") + inspect_p.add_argument("target_root", help="Target USB/SSD portable root") + inspect_p.set_defaults(func=portable_ai.inspect) + + evidence_p = sub.add_parser("evidence", help="Portable AI evidence helpers") + evidence_sub = evidence_p.add_subparsers(dest="evidence_command", metavar="") + evidence_sub.required = True + evidence_inspect_p = evidence_sub.add_parser("inspect", help="Inspect portable AI evidence JSON") + evidence_inspect_p.add_argument("path", help="Evidence JSON path") + evidence_inspect_p.set_defaults(func=portable_ai.evidence_inspect) + + return parser + + +def main(argv=None) -> int: + parser = build_parser() + args = parser.parse_args(argv) + return args.func(args) or 0 + + +if __name__ == "__main__": + sys.exit(main()) From 8bea48155e20c92a723354160656efa20517e496 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 15:48:44 -0400 Subject: [PATCH 04/22] Add portable AI sourceosctl plugin parser --- sourceosctl/commands/portable_ai_cli.py | 106 ++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 sourceosctl/commands/portable_ai_cli.py diff --git a/sourceosctl/commands/portable_ai_cli.py b/sourceosctl/commands/portable_ai_cli.py new file mode 100644 index 0000000..794ca20 --- /dev/null +++ b/sourceosctl/commands/portable_ai_cli.py @@ -0,0 +1,106 @@ +"""Argument parser for the SourceOS Portable AI Kit command group.""" + +from __future__ import annotations + +import argparse + +from sourceosctl.commands import portable_ai + + +SURFACES = [ + "turtleterm", + "agent-term", + "bearbrowser", + "local-web", + "anythingllm-adapter", +] + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="sourceosctl portable-ai", + description="SourceOS Portable AI Kit helpers (dry-run / evidence-first surface)", + ) + sub = parser.add_subparsers(dest="command", metavar="") + sub.required = True + + profiles_p = sub.add_parser("profiles", help="List built-in portable AI profiles") + profiles_p.set_defaults(func=portable_ai.profiles) + + preflight_p = sub.add_parser( + "preflight", + help="Inspect a portable root target without mutating it", + ) + preflight_p.add_argument("target_root", help="Target USB/SSD portable root") + preflight_p.add_argument( + "--benchmark", + action="store_true", + default=False, + help="Reserve flag for future read/write benchmark", + ) + preflight_p.set_defaults(func=portable_ai.preflight) + + prepare_p = sub.add_parser( + "prepare", + help="Render or execute portable root materialization", + ) + prepare_p.add_argument("target_root", help="Target USB/SSD portable root") + prepare_p.add_argument( + "--profile", + default="laptop-safe", + choices=sorted(portable_ai.PORTABLE_PROFILES), + help="Portable profile", + ) + prepare_p.add_argument( + "--dry-run", + action="store_true", + default=True, + dest="dry_run", + help="Render plan without writing files", + ) + prepare_p.add_argument( + "--execute", + action="store_true", + default=False, + help="Create declared portable-root directories and manifest", + ) + prepare_p.add_argument( + "--policy-ok", + action="store_true", + default=False, + help="Confirm policy/operator approval for materialization", + ) + prepare_p.add_argument("--evidence-out", default=None, help="Optional evidence JSON path") + prepare_p.set_defaults(func=portable_ai.prepare) + + start_p = sub.add_parser( + "start-plan", + help="Render a local runtime/surface launch plan without starting daemons", + ) + start_p.add_argument("target_root", help="Target USB/SSD portable root") + start_p.add_argument( + "--surface", + default="turtleterm", + choices=SURFACES, + help="Launch surface", + ) + start_p.set_defaults(func=portable_ai.start_plan) + + inspect_p = sub.add_parser("inspect", help="Inspect portable root layout state") + inspect_p.add_argument("target_root", help="Target USB/SSD portable root") + inspect_p.set_defaults(func=portable_ai.inspect) + + evidence_p = sub.add_parser("evidence", help="Portable AI evidence helpers") + evidence_sub = evidence_p.add_subparsers(dest="evidence_command", metavar="") + evidence_sub.required = True + evidence_inspect_p = evidence_sub.add_parser("inspect", help="Inspect portable AI evidence JSON") + evidence_inspect_p.add_argument("path", help="Evidence JSON path") + evidence_inspect_p.set_defaults(func=portable_ai.evidence_inspect) + + return parser + + +def main(argv=None) -> int: + parser = build_parser() + args = parser.parse_args(argv) + return args.func(args) or 0 From 2d9f85917136e9725b269af64ecfd34b3d5bfb8b Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 15:49:56 -0400 Subject: [PATCH 05/22] Route portable-ai through sourceosctl entrypoint --- bin/sourceosctl | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 bin/sourceosctl diff --git a/bin/sourceosctl b/bin/sourceosctl new file mode 100644 index 0000000..de43ea2 --- /dev/null +++ b/bin/sourceosctl @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""sourceosctl entry-point script.""" +import sys +import os + +# Allow running directly from the repo root without installing. +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +# Lightweight plugin routing for newer command groups while keeping the core +# argparse surface stable. These command groups are non-mutating plan/probe +# surfaces and own their own subparsers. +if len(sys.argv) > 1 and sys.argv[1] == "network": + from sourceosctl.commands.network import network_main + + sys.exit(network_main(sys.argv[2:])) + +if len(sys.argv) > 1 and sys.argv[1] == "native-assistant": + from sourceosctl.commands.network import native_assistant_main + + sys.exit(native_assistant_main(sys.argv[2:])) + +if len(sys.argv) > 1 and sys.argv[1] == "portable-ai": + from sourceosctl.commands.portable_ai_cli import main as portable_ai_main + + sys.exit(portable_ai_main(sys.argv[2:])) + +from sourceosctl.cli import main + +sys.exit(main()) From d4c7645340d71c35cc8ebe7c8883b10c4bfc5c3c Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 15:52:23 -0400 Subject: [PATCH 06/22] Add portable AI CLI tests --- tests/test_portable_ai_cli.py | 100 ++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 tests/test_portable_ai_cli.py diff --git a/tests/test_portable_ai_cli.py b/tests/test_portable_ai_cli.py new file mode 100644 index 0000000..0c2043e --- /dev/null +++ b/tests/test_portable_ai_cli.py @@ -0,0 +1,100 @@ +"""Unit tests for SourceOS Portable AI Kit commands.""" + +import json +import os +import pathlib +import sys +import tempfile +import unittest +from unittest import mock + +_REPO_ROOT = pathlib.Path(__file__).parent.parent +sys.path.insert(0, str(_REPO_ROOT)) + +from sourceosctl.commands import portable_ai +from sourceosctl.commands.portable_ai_cli import main as portable_ai_main + + +class TestPortableAICommands(unittest.TestCase): + def test_profiles_direct(self): + self.assertEqual(portable_ai.profiles(mock.Mock()), 0) + + def test_profiles_cli(self): + self.assertEqual(portable_ai_main(["profiles"]), 0) + + def test_preflight_existing_tempdir(self): + with tempfile.TemporaryDirectory() as tmpdir: + self.assertEqual(portable_ai_main(["preflight", tmpdir]), 0) + + def test_prepare_dry_run_does_not_create_target(self): + with tempfile.TemporaryDirectory() as parent: + target = pathlib.Path(parent) / "SOURCEOS_AI" + self.assertEqual(portable_ai_main(["prepare", str(target), "--profile", "tiny-router"]), 0) + self.assertFalse(target.exists()) + + def test_prepare_execute_requires_policy_ok(self): + with tempfile.TemporaryDirectory() as parent: + target = pathlib.Path(parent) / "SOURCEOS_AI" + self.assertEqual( + portable_ai_main(["prepare", str(target), "--execute", "--profile", "tiny-router"]), + 2, + ) + self.assertFalse(target.exists()) + + def test_prepare_execute_creates_manifest_and_evidence(self): + with tempfile.TemporaryDirectory() as parent: + target = pathlib.Path(parent) / "SOURCEOS_AI" + evidence = pathlib.Path(parent) / "evidence.json" + self.assertEqual( + portable_ai_main([ + "prepare", + str(target), + "--profile", + "laptop-safe", + "--execute", + "--policy-ok", + "--evidence-out", + str(evidence), + ]), + 0, + ) + self.assertTrue((target / "manifests" / "portable-ai-root.json").exists()) + self.assertTrue((target / "evidence" / "materialization").exists()) + self.assertTrue(evidence.exists()) + payload = json.loads(evidence.read_text()) + self.assertEqual(payload["type"], "PortableMaterializationEvidence") + self.assertEqual(payload["profile"], "laptop-safe") + self.assertFalse(payload["downloadedModels"]) + self.assertFalse(payload["startedRuntime"]) + + def test_start_plan(self): + with tempfile.TemporaryDirectory() as tmpdir: + self.assertEqual(portable_ai_main(["start-plan", tmpdir, "--surface", "turtleterm"]), 0) + + def test_inspect(self): + with tempfile.TemporaryDirectory() as tmpdir: + self.assertEqual(portable_ai_main(["inspect", tmpdir]), 0) + + def test_evidence_inspect_valid(self): + payload = { + "type": "PortablePreflightEvidence", + "apiVersion": portable_ai.PORTABLE_LAYOUT_VERSION, + "targetRoot": "/tmp/SOURCEOS_AI", + "decision": "pass", + "promptEgressDefault": "deny", + "hostWritesDefault": "deny", + } + with tempfile.NamedTemporaryFile(suffix=".json", mode="w", delete=False) as handle: + json.dump(payload, handle) + tmp_path = handle.name + try: + self.assertEqual(portable_ai_main(["evidence", "inspect", tmp_path]), 0) + finally: + os.unlink(tmp_path) + + def test_evidence_inspect_missing(self): + self.assertEqual(portable_ai_main(["evidence", "inspect", "/nonexistent/portable-ai.json"]), 1) + + +if __name__ == "__main__": + unittest.main() From dda17eb51253aef4edff434d17c1ac13e8d43932 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 16:28:10 -0400 Subject: [PATCH 07/22] Document Portable AI Kit demo path --- README.md | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index febc4e8..bb9b513 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ `sourceos-devtools` is the installable SourceOS developer/operator toolkit surface. -It is the home for Linux-native developer tooling, AI operator tooling, lab profile selection, Nix/devshell orchestration, NLBoot/operator helpers, release tooling, local AI governance utilities, and workstation bootstrap flows. +It is the home for Linux-native developer tooling, AI operator tooling, lab profile selection, Nix/devshell orchestration, NLBoot/operator helpers, release tooling, local AI governance utilities, workstation bootstrap flows, and Portable AI Kit preparation flows. ## Scope @@ -16,6 +16,7 @@ It should contain: - lab/profile selection utilities; - local model-service client helpers; - model-router client utilities; +- Portable AI Kit preflight, prepare, launch-plan, inspect, and evidence helpers; - guardrail/eval/evidence helpers; - agent sandbox/run helpers; - fingerprint and proof bundle tools; @@ -34,6 +35,34 @@ It should not contain: - SourceOS image build state; - secrets, tokens, credentials, private keys, or device-specific enrollment secrets. +## Portable AI Kit + +Portable AI Kit is the pocketable local-AI appliance mode for SourceOS: prepare a USB drive or portable SSD once, carry a governed local AI workstation, and run it on a supported host without sending prompts or chat state off-device by default. + +Quick demo path from a checkout: + +```bash +python3 bin/sourceosctl portable-ai profiles +python3 bin/sourceosctl portable-ai preflight /Volumes/SOURCEOS_AI +python3 bin/sourceosctl portable-ai prepare /Volumes/SOURCEOS_AI --profile laptop-safe --dry-run +python3 bin/sourceosctl portable-ai prepare /Volumes/SOURCEOS_AI --profile laptop-safe --execute --policy-ok --evidence-out ./portable-ai-evidence.json +python3 bin/sourceosctl portable-ai start-plan /Volumes/SOURCEOS_AI --surface turtleterm +python3 bin/sourceosctl portable-ai inspect /Volumes/SOURCEOS_AI +``` + +Portable AI Kit does **not** download model weights implicitly, start local daemons implicitly, run inference during preflight, or authorize prompt egress by default. Runtime activation belongs to Agent Machine. Model pack definitions belong to `SourceOS-Linux/sourceos-model-carry`. Routing belongs to `SocioProphet/model-router` under Policy Fabric posture. + +Default profiles: + +| Profile key | Purpose | Minimum free space | Default posture | +| --- | --- | --- | --- | +| `tiny-router` | Routing, triage, rewrite, summarization | 8 GB | local-only, no tools | +| `laptop-safe` | Offline fallback, Office assist, privacy-first chat | 16 GB | prompt egress denied | +| `office-local` | Office Plane summarization and artifact drafting | 32 GB | workroom-scoped host writes | +| `code-local` | Repo triage and local coding assistance | 32 GB | repo-scoped host writes | +| `field-kit` | Field/operator portable SSD kit | 64 GB | evidence-first | +| `byom-gguf` | Bring-your-own GGUF import profile | varies | hash required before eligibility | + ## First milestone M1 is repo maturity and install surface definition: @@ -50,6 +79,10 @@ M1 is repo maturity and install surface definition: - `SociOS-Linux/nlboot`: boot/recovery client and evidence records. - `SourceOS-Linux/sourceos-spec`: canonical SourceOS schemas and contracts. - `SourceOS-Linux/sourceos-boot`: SourceOS boot/recovery integration. +- `SourceOS-Linux/sourceos-model-carry`: local model profiles and carry-layer service refs. +- `SourceOS-Linux/agent-machine`: governed local runtime activation, teardown, and evidence receipts. +- `SourceOS-Linux/TurtleTerm`: first-class terminal surface for Portable AI Kit. +- `SourceOS-Linux/agent-term`: terminal-native SourceOS operator ChatOps console. - `SocioProphet/homebrew-prophet`: Homebrew install formulae. - `SocioProphet/model-router`: governed model/service routing. - `SocioProphet/guardrail-fabric`: guardrail policy client integration. @@ -62,4 +95,4 @@ M1 is repo maturity and install surface definition: make validate ``` -The initial validation target checks repository metadata and JSON/YAML syntax where present. Implementation-specific validation should be added with each tool surface. +The validation target checks repository metadata and runs the unit test suite. Implementation-specific validation should be added with each tool surface. From f241a838188d5549bcd9f6d31708c5ccde98f3fc Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Mon, 4 May 2026 16:31:20 -0400 Subject: [PATCH 08/22] Reuse portable AI parser in standalone entrypoint --- bin/sourceos-portable-ai | 61 ++++------------------------------------ 1 file changed, 5 insertions(+), 56 deletions(-) diff --git a/bin/sourceos-portable-ai b/bin/sourceos-portable-ai index 160c421..d5c3596 100644 --- a/bin/sourceos-portable-ai +++ b/bin/sourceos-portable-ai @@ -1,66 +1,15 @@ #!/usr/bin/env python3 -"""Standalone SourceOS Portable AI Kit CLI. - -This entrypoint exists so the portable-AI implementation can be exercised before it -is promoted into the main sourceosctl parser. -""" +"""Standalone SourceOS Portable AI Kit CLI.""" from __future__ import annotations -import argparse +import os import sys -from sourceosctl.commands import portable_ai - - -def build_parser() -> argparse.ArgumentParser: - parser = argparse.ArgumentParser( - prog="sourceos-portable-ai", - description="SourceOS Portable AI Kit helpers (dry-run / evidence-first surface)", - ) - sub = parser.add_subparsers(dest="command", metavar="") - sub.required = True - - profiles_p = sub.add_parser("profiles", help="List built-in portable AI profiles") - profiles_p.set_defaults(func=portable_ai.profiles) - - preflight_p = sub.add_parser("preflight", help="Inspect a portable root target without mutating it") - preflight_p.add_argument("target_root", help="Target USB/SSD portable root") - preflight_p.add_argument("--benchmark", action="store_true", default=False, help="Reserve flag for future read/write benchmark") - preflight_p.set_defaults(func=portable_ai.preflight) - - prepare_p = sub.add_parser("prepare", help="Render or execute portable root materialization") - prepare_p.add_argument("target_root", help="Target USB/SSD portable root") - prepare_p.add_argument("--profile", default="laptop-safe", choices=sorted(portable_ai.PORTABLE_PROFILES), help="Portable profile") - prepare_p.add_argument("--dry-run", action="store_true", default=True, dest="dry_run", help="Render plan without writing files") - prepare_p.add_argument("--execute", action="store_true", default=False, help="Create declared portable-root directories and manifest") - prepare_p.add_argument("--policy-ok", action="store_true", default=False, help="Confirm policy/operator approval for materialization") - prepare_p.add_argument("--evidence-out", default=None, help="Optional evidence JSON path") - prepare_p.set_defaults(func=portable_ai.prepare) - - start_p = sub.add_parser("start-plan", help="Render a local runtime/surface launch plan without starting daemons") - start_p.add_argument("target_root", help="Target USB/SSD portable root") - start_p.add_argument("--surface", default="turtleterm", choices=["turtleterm", "agent-term", "bearbrowser", "local-web", "anythingllm-adapter"], help="Launch surface") - start_p.set_defaults(func=portable_ai.start_plan) - - inspect_p = sub.add_parser("inspect", help="Inspect portable root layout state") - inspect_p.add_argument("target_root", help="Target USB/SSD portable root") - inspect_p.set_defaults(func=portable_ai.inspect) - - evidence_p = sub.add_parser("evidence", help="Portable AI evidence helpers") - evidence_sub = evidence_p.add_subparsers(dest="evidence_command", metavar="") - evidence_sub.required = True - evidence_inspect_p = evidence_sub.add_parser("inspect", help="Inspect portable AI evidence JSON") - evidence_inspect_p.add_argument("path", help="Evidence JSON path") - evidence_inspect_p.set_defaults(func=portable_ai.evidence_inspect) - - return parser - +# Allow running directly from the repo root without installing. +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -def main(argv=None) -> int: - parser = build_parser() - args = parser.parse_args(argv) - return args.func(args) or 0 +from sourceosctl.commands.portable_ai_cli import main if __name__ == "__main__": From 7466b4cd112c3719d3afe2015089b4934b3a6bcd Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Tue, 5 May 2026 12:08:43 -0400 Subject: [PATCH 09/22] Harden portable AI preflight checks --- sourceosctl/commands/portable_ai.py | 301 ++++++++++++++++++++++++++-- 1 file changed, 279 insertions(+), 22 deletions(-) diff --git a/sourceosctl/commands/portable_ai.py b/sourceosctl/commands/portable_ai.py index 31c72d2..89f05cc 100644 --- a/sourceosctl/commands/portable_ai.py +++ b/sourceosctl/commands/portable_ai.py @@ -12,13 +12,16 @@ import os import platform import shutil -import stat +import subprocess import sys +import tempfile +import time from pathlib import Path from typing import Any, Dict PORTABLE_LAYOUT_VERSION = "sourceos.portable-ai/v1alpha1" +BENCHMARK_SIZE_MB = 8 PORTABLE_PROFILES: dict[str, dict[str, Any]] = { "tiny-router": { @@ -94,6 +97,30 @@ "tmp", ] +LARGE_FILE_SAFE_FSTYPES = { + "apfs", + "btrfs", + "exfat", + "ext2", + "ext3", + "ext4", + "f2fs", + "hfs", + "hfs+", + "ntfs", + "ufs", + "xfs", + "zfs", +} + +LARGE_FILE_BLOCKING_FSTYPES = { + "fat", + "fat16", + "fat32", + "msdos", + "vfat", +} + def _now() -> str: return _dt.datetime.now(_dt.timezone.utc).isoformat() @@ -108,8 +135,19 @@ def _target(path_value: str) -> Path: return Path(path_value).expanduser().resolve() +def _probe_path(path: Path) -> Path: + return path if path.exists() else path.parent + + +def _run(args: list[str], timeout: int = 3) -> subprocess.CompletedProcess[str] | None: + try: + return subprocess.run(args, check=False, capture_output=True, text=True, timeout=timeout) + except Exception: + return None + + def _disk_usage_gb(path: Path) -> dict[str, float | None]: - probe = path if path.exists() else path.parent + probe = _probe_path(path) try: total, used, free = shutil.disk_usage(probe) except FileNotFoundError: @@ -123,17 +161,153 @@ def _disk_usage_gb(path: Path) -> dict[str, float | None]: def _writable(path: Path) -> bool: - probe = path if path.exists() else path.parent + probe = _probe_path(path) return probe.exists() and os.access(probe, os.W_OK) -def _large_file_warning(path: Path) -> str | None: - # Python's stdlib does not expose portable fs type for every platform. - # Keep this conservative; Linux/macOS launchers can add richer fs probing. - name = str(path).lower() - if "fat32" in name or "vfat" in name: - return "target path appears to reference FAT32/VFAT; GGUF files larger than 4GB may fail" - return None +def _host_facts() -> dict[str, Any]: + ram_gb: float | None = None + try: + if hasattr(os, "sysconf"): + pages = os.sysconf("SC_PHYS_PAGES") + page_size = os.sysconf("SC_PAGE_SIZE") + ram_gb = round((pages * page_size) / (1024 ** 3), 2) + except Exception: + ram_gb = None + + return { + "system": platform.system(), + "machine": platform.machine(), + "processor": platform.processor() or None, + "platform": platform.platform(), + "pythonVersion": platform.python_version(), + "cpuCount": os.cpu_count(), + "ramGb": ram_gb, + } + + +def _linux_block_details(source: str | None) -> dict[str, Any]: + details: dict[str, Any] = { + "baseDevice": None, + "transport": None, + "removableFlag": None, + "model": None, + "vendor": None, + } + if not source or not source.startswith("/dev/"): + return details + + pk = _run(["lsblk", "-ndo", "PKNAME", source]) + base_name = pk.stdout.strip().splitlines()[0] if pk and pk.stdout.strip() else Path(source).name + base = f"/dev/{base_name}" if not base_name.startswith("/dev/") else base_name + details["baseDevice"] = base + + for key, column in [ + ("transport", "TRAN"), + ("removableFlag", "RM"), + ("model", "MODEL"), + ("vendor", "VENDOR"), + ]: + result = _run(["lsblk", "-ndo", column, base]) + value = result.stdout.strip().splitlines()[0] if result and result.stdout.strip() else None + details[key] = value + return details + + +def _darwin_block_details(source: str | None) -> dict[str, Any]: + details: dict[str, Any] = { + "baseDevice": source, + "transport": None, + "removableFlag": None, + "model": None, + "vendor": None, + } + if not source or not source.startswith("/dev/"): + return details + + result = _run(["diskutil", "info", source]) + if not result or result.returncode != 0: + return details + for raw_line in result.stdout.splitlines(): + line = raw_line.strip() + if line.startswith("Protocol:"): + details["transport"] = line.split(":", 1)[1].strip() + elif line.startswith("Removable Media:"): + details["removableFlag"] = line.split(":", 1)[1].strip() + elif line.startswith("Device / Media Name:"): + details["model"] = line.split(":", 1)[1].strip() + return details + + +def _mount_info(path: Path) -> dict[str, Any]: + probe = _probe_path(path) + info: dict[str, Any] = { + "probePath": str(probe), + "source": None, + "fsType": None, + "options": None, + "readOnly": None, + "largeFileSupport": "unknown", + "largeFileReason": "filesystem type unavailable", + "removableConfidence": "unknown", + "block": {}, + } + + system = platform.system() + if system == "Linux" and shutil.which("findmnt"): + result = _run(["findmnt", "-n", "-T", str(probe), "-o", "SOURCE,FSTYPE,OPTIONS"]) + if result and result.returncode == 0 and result.stdout.strip(): + parts = result.stdout.strip().split(maxsplit=2) + if len(parts) >= 1: + info["source"] = parts[0] + if len(parts) >= 2: + info["fsType"] = parts[1].lower() + if len(parts) >= 3: + info["options"] = parts[2] + elif system == "Darwin": + df = _run(["df", "-P", str(probe)]) + if df and df.returncode == 0: + lines = [line for line in df.stdout.splitlines() if line.strip()] + if len(lines) >= 2: + info["source"] = lines[1].split()[0] + mount = _run(["mount"]) + if mount and mount.returncode == 0 and info.get("source"): + for line in mount.stdout.splitlines(): + if line.startswith(str(info["source"]) + " on "): + if "(" in line and ")" in line: + opts = line.rsplit("(", 1)[1].rstrip(")") + bits = [bit.strip() for bit in opts.split(",")] + info["fsType"] = bits[0].lower() if bits else None + info["options"] = ",".join(bits[1:]) if len(bits) > 1 else None + break + + opts = str(info.get("options") or "") + if opts: + info["readOnly"] = "ro" in {part.strip().lower() for part in opts.split(",")} + + fs_type = str(info.get("fsType") or "").lower() + if fs_type in LARGE_FILE_BLOCKING_FSTYPES: + info["largeFileSupport"] = "blocked" + info["largeFileReason"] = f"{fs_type} has a practical 4GB per-file limit" + elif fs_type in LARGE_FILE_SAFE_FSTYPES: + info["largeFileSupport"] = "ok" + info["largeFileReason"] = f"{fs_type} supports large model files" + + if system == "Linux": + block = _linux_block_details(info.get("source")) + elif system == "Darwin": + block = _darwin_block_details(info.get("source")) + else: + block = {} + info["block"] = block + + transport = str(block.get("transport") or "").lower() + removable = str(block.get("removableFlag") or "").lower() + if transport == "usb" or removable in {"1", "yes", "removable"}: + info["removableConfidence"] = "high" + elif info.get("source"): + info["removableConfidence"] = "low" + return info def _runtime_paths() -> dict[str, str | None]: @@ -144,6 +318,53 @@ def _runtime_paths() -> dict[str, str | None]: } +def _benchmark(path: Path, size_mb: int = BENCHMARK_SIZE_MB) -> dict[str, Any]: + probe = _probe_path(path) + result: dict[str, Any] = { + "requested": True, + "performed": False, + "sizeMb": size_mb, + "writeMBps": None, + "readMBps": None, + "tempFileRemoved": False, + "error": None, + } + if not probe.exists() or not os.access(probe, os.W_OK): + result["error"] = "benchmark target is not writable" + return result + + tmp_path: str | None = None + try: + with tempfile.NamedTemporaryFile(prefix=".sourceos_portable_ai_bench_", suffix=".tmp", dir=str(probe), delete=False) as handle: + tmp_path = handle.name + chunk = b"0" * (1024 * 1024) + start = time.perf_counter() + for _ in range(size_mb): + handle.write(chunk) + handle.flush() + os.fsync(handle.fileno()) + elapsed = max(time.perf_counter() - start, 1e-9) + result["writeMBps"] = round(size_mb / elapsed, 2) + + start = time.perf_counter() + with open(tmp_path, "rb") as handle: + while handle.read(1024 * 1024): + pass + elapsed = max(time.perf_counter() - start, 1e-9) + result["readMBps"] = round(size_mb / elapsed, 2) + result["performed"] = True + except Exception as exc: # pragma: no cover - defensive around host IO + result["error"] = str(exc) + finally: + if tmp_path: + try: + os.unlink(tmp_path) + result["tempFileRemoved"] = True + except OSError: + result["tempFileRemoved"] = False + return result + + def _profile(name: str) -> dict[str, Any]: try: return PORTABLE_PROFILES[name] @@ -171,7 +392,8 @@ def profiles(args) -> int: def preflight(args) -> int: target = _target(args.target_root) usage = _disk_usage_gb(target) - warning = _large_file_warning(target) + mount = _mount_info(target) + host = _host_facts() runtime_paths = _runtime_paths() exists = target.exists() writable = _writable(target) @@ -183,13 +405,49 @@ def preflight(args) -> int: failures.append("target parent does not exist") if not writable: failures.append("target or parent is not writable") - if warning: - warnings.append(warning) + if mount.get("readOnly") is True: + failures.append("target mount is read-only") + if mount.get("largeFileSupport") == "blocked": + failures.append(str(mount.get("largeFileReason"))) + elif mount.get("largeFileSupport") == "unknown": + warnings.append("large-file support could not be confirmed") + if mount.get("removableConfidence") == "low": + warnings.append("target does not appear to be removable USB media; proceed only if this is an approved portable SSD/root") + if free_gb is not None and free_gb < 8: failures.append("less than 8GB free; no built-in portable profile can be prepared safely") elif free_gb is not None and free_gb < 16: warnings.append("less than 16GB free; only tiny-router or small BYOM profiles are realistic") + ram_gb = host.get("ramGb") + if isinstance(ram_gb, (int, float)): + if ram_gb < 8: + warnings.append("host RAM is below 8GB; only very small local models are realistic") + elif ram_gb < 16: + warnings.append("host RAM is below 16GB; prefer tiny-router or laptop-safe profiles") + else: + warnings.append("host RAM could not be detected") + + benchmark = { + "requested": bool(getattr(args, "benchmark", False)), + "performed": False, + } + if getattr(args, "benchmark", False): + benchmark = _benchmark(target) + if benchmark.get("error"): + warnings.append(f"benchmark did not complete: {benchmark['error']}") + elif benchmark.get("performed"): + write_speed = benchmark.get("writeMBps") or 0 + read_speed = benchmark.get("readMBps") or 0 + if write_speed < 10: + failures.append(f"write benchmark below minimum: {write_speed} MB/s") + elif write_speed < 25: + warnings.append(f"write benchmark is usable but slow: {write_speed} MB/s") + if read_speed < 20: + failures.append(f"read benchmark below minimum: {read_speed} MB/s") + elif read_speed < 50: + warnings.append(f"read benchmark is usable but slow: {read_speed} MB/s") + decision = "fail" if failures else "warn" if warnings else "pass" return _print_json( @@ -201,19 +459,18 @@ def preflight(args) -> int: "exists": exists, "writable": writable, "disk": usage, - "host": { - "system": platform.system(), - "machine": platform.machine(), - "platform": platform.platform(), - }, + "mount": mount, + "host": host, "runtimePaths": runtime_paths, - "benchmarkRequested": bool(getattr(args, "benchmark", False)), - "benchmarkPerformed": False, - "largeFileSupportWarning": warning, + "benchmark": benchmark, + "benchmarkRequested": benchmark.get("requested", False), + "benchmarkPerformed": benchmark.get("performed", False), + "largeFileSupportWarning": None if mount.get("largeFileSupport") == "ok" else mount.get("largeFileReason"), "failures": failures, "warnings": warnings, "decision": decision, - "mutatesTarget": False, + "mutatesTarget": bool(getattr(args, "benchmark", False)), + "mutationScope": "temporary benchmark file only" if getattr(args, "benchmark", False) else None, } ) From f7db0b435be5a62fc7885b90921fe94529b36a8f Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Tue, 5 May 2026 12:29:54 -0400 Subject: [PATCH 10/22] Cover portable AI preflight hardening --- tests/test_portable_ai_cli.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/test_portable_ai_cli.py b/tests/test_portable_ai_cli.py index 0c2043e..8a45382 100644 --- a/tests/test_portable_ai_cli.py +++ b/tests/test_portable_ai_cli.py @@ -26,6 +26,39 @@ def test_preflight_existing_tempdir(self): with tempfile.TemporaryDirectory() as tmpdir: self.assertEqual(portable_ai_main(["preflight", tmpdir]), 0) + def test_preflight_records_mount_and_host_facts(self): + with tempfile.TemporaryDirectory() as tmpdir: + captured = {} + + def capture(payload): + captured.update(payload) + return 0 + + args = mock.Mock(target_root=tmpdir, benchmark=False) + with mock.patch("sourceosctl.commands.portable_ai._print_json", side_effect=capture): + self.assertEqual(portable_ai.preflight(args), 0) + + self.assertEqual(captured["type"], "PortablePreflightEvidence") + self.assertIn("mount", captured) + self.assertIn("host", captured) + self.assertIn("disk", captured) + self.assertIn("runtimePaths", captured) + self.assertFalse(captured["benchmarkRequested"]) + self.assertFalse(captured["benchmarkPerformed"]) + self.assertFalse(captured["mutatesTarget"]) + + def test_preflight_benchmark_removes_tempfile(self): + with tempfile.TemporaryDirectory() as tmpdir: + before = set(os.listdir(tmpdir)) + result = portable_ai._benchmark(pathlib.Path(tmpdir), size_mb=1) + after = set(os.listdir(tmpdir)) + self.assertTrue(result["requested"]) + self.assertTrue(result["performed"]) + self.assertTrue(result["tempFileRemoved"]) + self.assertEqual(before, after) + self.assertGreater(result["writeMBps"], 0) + self.assertGreater(result["readMBps"], 0) + def test_prepare_dry_run_does_not_create_target(self): with tempfile.TemporaryDirectory() as parent: target = pathlib.Path(parent) / "SOURCEOS_AI" From 711fa5247fdc167a9ae4a3d461cc68b90258b4e1 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Wed, 6 May 2026 00:02:07 -0400 Subject: [PATCH 11/22] Add portable AI BYOM verification flow --- sourceosctl/commands/portable_ai_byom.py | 257 +++++++++++++++++++++++ 1 file changed, 257 insertions(+) create mode 100644 sourceosctl/commands/portable_ai_byom.py diff --git a/sourceosctl/commands/portable_ai_byom.py b/sourceosctl/commands/portable_ai_byom.py new file mode 100644 index 0000000..ce71988 --- /dev/null +++ b/sourceosctl/commands/portable_ai_byom.py @@ -0,0 +1,257 @@ +"""Portable AI Kit BYOM model verification helpers. + +This module handles local bring-your-own model file verification. It never +contacts a network endpoint and never downloads model weights. +""" + +from __future__ import annotations + +import datetime as _dt +import hashlib +import json +import re +import shutil +import sys +from pathlib import Path +from typing import Any, Dict + +from sourceosctl.commands import portable_ai + + +DEFAULT_TASK_CLASSES = ["operator-selected"] +CHUNK_SIZE = 1024 * 1024 + + +def _now() -> str: + return _dt.datetime.now(_dt.timezone.utc).isoformat() + + +def _print_json(payload: Dict[str, Any]) -> int: + print(json.dumps(payload, indent=2, sort_keys=True)) + return 0 + + +def _safe_slug(value: str) -> str: + slug = re.sub(r"[^a-zA-Z0-9_.-]+", "-", value.strip().lower()).strip("-._") + return slug or "operator-supplied-model" + + +def _sha256_file(path: Path) -> str: + digest = hashlib.sha256() + with path.open("rb") as handle: + for chunk in iter(lambda: handle.read(CHUNK_SIZE), b""): + digest.update(chunk) + return digest.hexdigest() + + +def _target(path_value: str) -> Path: + return Path(path_value).expanduser().resolve() + + +def _model_path(path_value: str) -> Path: + return Path(path_value).expanduser().resolve() + + +def _manifest_path(target_root: Path, slug: str) -> Path: + return target_root / "manifests" / f"model-carry-pack.byom-gguf.{slug}.json" + + +def _copied_model_path(target_root: Path, source: Path) -> Path: + return target_root / "models" / "blobs" / source.name + + +def _build_pack( + *, + pack_id: str, + display_name: str, + model_file: Path, + sha256: str, + size_bytes: int, + license_ref: str, + source_note: str | None, + task_classes: list[str], + copied_to: Path | None, +) -> dict[str, Any]: + size_mb = max(1, round(size_bytes / (1024 * 1024))) + return { + "schemaVersion": "v0.1", + "kind": "ModelCarryPack", + "packId": pack_id, + "displayName": display_name, + "profileKey": "byom-gguf", + "model": { + "name": model_file.name, + "family": "operator-supplied", + "parameterClass": "other", + "quantization": "operator-supplied", + "format": "gguf", + "contextWindowHint": None, + "diskSizeHintMb": size_mb, + "memoryHintMb": None, + }, + "runtimeCompatibility": ["llama.cpp", "ollama-compatible", "openai-compatible-local"], + "footprint": { + "minimumFreeGb": 8, + "recommendedFreeGb": 64, + "minimumRamGb": 8, + "recommendedRamGb": 32, + }, + "provenance": { + "sourceKind": "local-file", + "sourceUrl": None, + "sourceNote": source_note, + "licenseRef": license_ref, + "modelCardRef": None, + "sha256": sha256, + "sha256RequiredBeforeEligibility": True, + }, + "taskClasses": task_classes, + "labels": ["local-only", "byom-verified"], + "policy": { + "localOnlyDefault": True, + "promptEgressDefault": "deny", + "allowToolUseDefault": False, + "allowNetworkDefault": False, + "requiresExplicitImport": True, + "requiresEvidence": True, + "eligibleForRoutingBeforeHash": False, + "maxPromptChars": None, + }, + "storage": { + "sourcePath": str(model_file), + "copiedToPortableRoot": str(copied_to) if copied_to else None, + }, + "evidence": { + "emitPackVerification": True, + "emitRuntimeHealth": True, + "emitRouteDecision": True, + "emitPromptHashOnly": True, + }, + "notes": "Operator-supplied local model file verified by SourceOS Portable AI Kit. No download was performed.", + } + + +def verify(args) -> int: + """Verify a local BYOM model file and optionally write a carry-pack manifest.""" + target_root = _target(args.target_root) + model_file = _model_path(args.model_file) + failures: list[str] = [] + warnings: list[str] = [] + + if not model_file.exists(): + failures.append("model file does not exist") + elif not model_file.is_file(): + failures.append("model path is not a regular file") + + if model_file.suffix.lower() != ".gguf": + warnings.append("model file does not end with .gguf; treat this as an operator-attested local model only") + + if getattr(args, "execute", False) and not getattr(args, "policy_ok", False): + failures.append("--execute requires --policy-ok") + + target_manifest_dir = target_root / "manifests" + target_blob_dir = target_root / "models" / "blobs" + if getattr(args, "execute", False): + if not target_root.exists(): + failures.append("target root does not exist; run portable-ai prepare first") + if target_root.exists() and not target_manifest_dir.exists(): + failures.append("target root is missing manifests directory; run portable-ai prepare first") + if getattr(args, "copy", False) and target_root.exists() and not target_blob_dir.exists(): + failures.append("target root is missing models/blobs directory; run portable-ai prepare first") + + if failures: + return _print_json( + { + "type": "BYOMImportEvidence", + "apiVersion": portable_ai.PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target_root), + "modelFile": str(model_file), + "decision": "fail", + "failures": failures, + "warnings": warnings, + "wouldWriteManifest": False, + "wouldCopyModel": False, + "downloadedModel": False, + } + ) + + size_bytes = model_file.stat().st_size + sha256 = _sha256_file(model_file) + slug = _safe_slug(args.name or model_file.stem) + pack_id = args.pack_id or f"urn:srcos:model-carry-pack:byom-gguf-{slug}" + display_name = args.display_name or args.name or model_file.stem + task_classes = args.task_class or DEFAULT_TASK_CLASSES + copied_to = _copied_model_path(target_root, model_file) if getattr(args, "copy", False) else None + manifest = _build_pack( + pack_id=pack_id, + display_name=display_name, + model_file=model_file, + sha256=sha256, + size_bytes=size_bytes, + license_ref=args.license_ref, + source_note=args.source_note, + task_classes=task_classes, + copied_to=copied_to, + ) + manifest_path = _manifest_path(target_root, slug) + + if not getattr(args, "execute", False): + return _print_json( + { + "type": "BYOMImportPlan", + "apiVersion": portable_ai.PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target_root), + "modelFile": str(model_file), + "sizeBytes": size_bytes, + "sha256": sha256, + "licenseRef": args.license_ref, + "packId": pack_id, + "manifestPath": str(manifest_path), + "wouldWriteManifest": True, + "wouldCopyModel": bool(getattr(args, "copy", False)), + "copyDestination": str(copied_to) if copied_to else None, + "downloadedModel": False, + "requiresExecuteAndPolicyOk": True, + "manifest": manifest, + "warnings": warnings, + } + ) + + target_manifest_dir.mkdir(parents=True, exist_ok=True) + copied = False + if getattr(args, "copy", False): + target_blob_dir.mkdir(parents=True, exist_ok=True) + assert copied_to is not None + shutil.copy2(model_file, copied_to) + copied = True + + manifest_path.write_text(json.dumps(manifest, indent=2, sort_keys=True) + "\n", encoding="utf-8") + + evidence = { + "type": "BYOMImportEvidence", + "apiVersion": portable_ai.PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target_root), + "modelFile": str(model_file), + "sizeBytes": size_bytes, + "sha256": sha256, + "licenseRef": args.license_ref, + "sourceNote": args.source_note, + "packId": pack_id, + "manifestPath": str(manifest_path), + "manifestWritten": True, + "modelCopied": copied, + "copyDestination": str(copied_to) if copied_to else None, + "downloadedModel": False, + "promptEgressDefault": "deny", + "toolUseDefault": "deny", + "networkDefault": "deny", + "decision": "verified", + "warnings": warnings, + } + if getattr(args, "evidence_out", None): + Path(args.evidence_out).expanduser().write_text(json.dumps(evidence, indent=2, sort_keys=True) + "\n", encoding="utf-8") + + return _print_json(evidence) From 86415f0f777afbfbf274b18939bb562a2ac1d89a Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Wed, 6 May 2026 00:03:45 -0400 Subject: [PATCH 12/22] Wire portable AI BYOM verify command --- sourceosctl/commands/portable_ai_cli.py | 72 ++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/sourceosctl/commands/portable_ai_cli.py b/sourceosctl/commands/portable_ai_cli.py index 794ca20..042b489 100644 --- a/sourceosctl/commands/portable_ai_cli.py +++ b/sourceosctl/commands/portable_ai_cli.py @@ -4,7 +4,7 @@ import argparse -from sourceosctl.commands import portable_ai +from sourceosctl.commands import portable_ai, portable_ai_byom SURFACES = [ @@ -15,6 +15,24 @@ "anythingllm-adapter", ] +BYOM_TASK_CLASSES = [ + "operator-selected", + "router", + "triage", + "summarization", + "rewrite", + "office-assist", + "artifact-drafting", + "coding-assist", + "repo-triage", + "privacy-first-chat", + "offline-fallback", + "operator-assist", + "evidence-inspection", + "workroom-local", + "field-workroom", +] + def build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( @@ -73,6 +91,58 @@ def build_parser() -> argparse.ArgumentParser: prepare_p.add_argument("--evidence-out", default=None, help="Optional evidence JSON path") prepare_p.set_defaults(func=portable_ai.prepare) + byom_p = sub.add_parser("byom", help="Bring-your-own local model helpers") + byom_sub = byom_p.add_subparsers(dest="byom_command", metavar="") + byom_sub.required = True + byom_verify_p = byom_sub.add_parser( + "verify", + help="Hash and verify a local model file, optionally writing a ModelCarryPack manifest", + ) + byom_verify_p.add_argument("target_root", help="Prepared Portable AI Kit root") + byom_verify_p.add_argument("model_file", help="Local model file to verify; no download is performed") + byom_verify_p.add_argument("--name", default=None, help="Short model/pack slug") + byom_verify_p.add_argument("--display-name", default=None, help="Display name for the model pack") + byom_verify_p.add_argument("--pack-id", default=None, help="Optional full model-carry-pack URN") + byom_verify_p.add_argument( + "--license-ref", + default="operator-attestation-required", + help="License or attestation reference for the operator-supplied file", + ) + byom_verify_p.add_argument("--source-note", default=None, help="Optional local provenance note") + byom_verify_p.add_argument( + "--task-class", + action="append", + choices=BYOM_TASK_CLASSES, + help="Allowed task class for this BYOM model; may be repeated", + ) + byom_verify_p.add_argument( + "--copy", + action="store_true", + default=False, + help="Copy the local model file into target_root/models/blobs when executing", + ) + byom_verify_p.add_argument( + "--dry-run", + action="store_true", + default=True, + dest="dry_run", + help="Render verification plan without writing manifest or copying model", + ) + byom_verify_p.add_argument( + "--execute", + action="store_true", + default=False, + help="Write BYOM ModelCarryPack manifest and optionally copy the file", + ) + byom_verify_p.add_argument( + "--policy-ok", + action="store_true", + default=False, + help="Confirm policy/operator approval for BYOM manifest materialization", + ) + byom_verify_p.add_argument("--evidence-out", default=None, help="Optional evidence JSON path") + byom_verify_p.set_defaults(func=portable_ai_byom.verify) + start_p = sub.add_parser( "start-plan", help="Render a local runtime/surface launch plan without starting daemons", From d7eab5586827fa436c49823540bd8ffd79293880 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Wed, 6 May 2026 00:04:37 -0400 Subject: [PATCH 13/22] Add portable AI BYOM tests --- tests/test_portable_ai_cli.py | 91 +++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/tests/test_portable_ai_cli.py b/tests/test_portable_ai_cli.py index 8a45382..51f950a 100644 --- a/tests/test_portable_ai_cli.py +++ b/tests/test_portable_ai_cli.py @@ -1,5 +1,6 @@ """Unit tests for SourceOS Portable AI Kit commands.""" +import hashlib import json import os import pathlib @@ -100,6 +101,96 @@ def test_prepare_execute_creates_manifest_and_evidence(self): self.assertFalse(payload["downloadedModels"]) self.assertFalse(payload["startedRuntime"]) + def test_byom_verify_dry_run_hashes_local_file_only(self): + with tempfile.TemporaryDirectory() as parent: + target = pathlib.Path(parent) / "SOURCEOS_AI" + model = pathlib.Path(parent) / "demo.gguf" + model.write_bytes(b"sourceos-test-model") + expected_hash = hashlib.sha256(b"sourceos-test-model").hexdigest() + captured = {} + + def capture(payload): + captured.update(payload) + return 0 + + with mock.patch("sourceosctl.commands.portable_ai_byom._print_json", side_effect=capture): + self.assertEqual( + portable_ai_main([ + "byom", + "verify", + str(target), + str(model), + "--name", + "demo", + ]), + 0, + ) + + self.assertEqual(captured["type"], "BYOMImportPlan") + self.assertEqual(captured["sha256"], expected_hash) + self.assertFalse(captured["downloadedModel"]) + self.assertTrue(captured["wouldWriteManifest"]) + self.assertFalse((target / "manifests").exists()) + + def test_byom_execute_requires_policy_ok(self): + with tempfile.TemporaryDirectory() as parent: + target = pathlib.Path(parent) / "SOURCEOS_AI" + model = pathlib.Path(parent) / "demo.gguf" + model.write_bytes(b"sourceos-test-model") + self.assertEqual( + portable_ai_main([ + "byom", + "verify", + str(target), + str(model), + "--execute", + ]), + 0, + ) + self.assertFalse((target / "manifests").exists()) + + def test_byom_execute_writes_manifest_after_prepare(self): + with tempfile.TemporaryDirectory() as parent: + target = pathlib.Path(parent) / "SOURCEOS_AI" + model = pathlib.Path(parent) / "demo.gguf" + model.write_bytes(b"sourceos-test-model") + evidence = pathlib.Path(parent) / "byom-evidence.json" + + self.assertEqual( + portable_ai_main([ + "prepare", + str(target), + "--profile", + "byom-gguf", + "--execute", + "--policy-ok", + ]), + 0, + ) + self.assertEqual( + portable_ai_main([ + "byom", + "verify", + str(target), + str(model), + "--name", + "demo", + "--execute", + "--policy-ok", + "--evidence-out", + str(evidence), + ]), + 0, + ) + manifest = target / "manifests" / "model-carry-pack.byom-gguf.demo.json" + self.assertTrue(manifest.exists()) + self.assertTrue(evidence.exists()) + payload = json.loads(evidence.read_text()) + self.assertEqual(payload["type"], "BYOMImportEvidence") + self.assertEqual(payload["decision"], "verified") + self.assertTrue(payload["manifestWritten"]) + self.assertFalse(payload["downloadedModel"]) + def test_start_plan(self): with tempfile.TemporaryDirectory() as tmpdir: self.assertEqual(portable_ai_main(["start-plan", tmpdir, "--surface", "turtleterm"]), 0) From 3e679e9ace12566ab1a572536e2745cd795dac9d Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Wed, 6 May 2026 00:06:55 -0400 Subject: [PATCH 14/22] Document portable AI BYOM verification path --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bb9b513..125b5dc 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ It should contain: - lab/profile selection utilities; - local model-service client helpers; - model-router client utilities; -- Portable AI Kit preflight, prepare, launch-plan, inspect, and evidence helpers; +- Portable AI Kit preflight, prepare, BYOM verification, launch-plan, inspect, and evidence helpers; - guardrail/eval/evidence helpers; - agent sandbox/run helpers; - fingerprint and proof bundle tools; @@ -50,6 +50,16 @@ python3 bin/sourceosctl portable-ai start-plan /Volumes/SOURCEOS_AI --surface tu python3 bin/sourceosctl portable-ai inspect /Volumes/SOURCEOS_AI ``` +BYOM GGUF/local model verification path: + +```bash +python3 bin/sourceosctl portable-ai prepare /Volumes/SOURCEOS_AI --profile byom-gguf --execute --policy-ok +python3 bin/sourceosctl portable-ai byom verify /Volumes/SOURCEOS_AI ./models/example.gguf --name example --license-ref operator-attestation-required +python3 bin/sourceosctl portable-ai byom verify /Volumes/SOURCEOS_AI ./models/example.gguf --name example --license-ref operator-attestation-required --execute --policy-ok --evidence-out ./byom-evidence.json +``` + +BYOM verification hashes a local file, records size and SHA-256, and emits a `ModelCarryPack` manifest. It does **not** download a model, contact a provider, start a runtime, grant network, grant tool use, or store prompt bodies. + Portable AI Kit does **not** download model weights implicitly, start local daemons implicitly, run inference during preflight, or authorize prompt egress by default. Runtime activation belongs to Agent Machine. Model pack definitions belong to `SourceOS-Linux/sourceos-model-carry`. Routing belongs to `SocioProphet/model-router` under Policy Fabric posture. Default profiles: From 38fdfeda38df880704fa9965d0d510ba66ecd530 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 00:56:39 -0400 Subject: [PATCH 15/22] Add portable AI runtime start and stop planners --- sourceosctl/commands/portable_ai_runtime.py | 234 ++++++++++++++++++++ 1 file changed, 234 insertions(+) create mode 100644 sourceosctl/commands/portable_ai_runtime.py diff --git a/sourceosctl/commands/portable_ai_runtime.py b/sourceosctl/commands/portable_ai_runtime.py new file mode 100644 index 0000000..96c9285 --- /dev/null +++ b/sourceosctl/commands/portable_ai_runtime.py @@ -0,0 +1,234 @@ +"""Portable AI Kit runtime start/stop planning. + +This module renders concrete runtime and surface handoff plans without starting +or stopping any process. Runtime activation remains Agent Machine gated. +""" + +from __future__ import annotations + +import datetime as _dt +import json +import shutil +from pathlib import Path +from typing import Any, Dict + +from sourceosctl.commands import portable_ai + + +DEFAULT_PROVIDER = "ollama-compatible" +DEFAULT_HOST = "127.0.0.1" +DEFAULT_PORT = 11434 +SUPPORTED_PROVIDERS = ["ollama-compatible", "llama.cpp", "openai-compatible-local"] + + +def _now() -> str: + return _dt.datetime.now(_dt.timezone.utc).isoformat() + + +def _print_json(payload: Dict[str, Any]) -> int: + print(json.dumps(payload, indent=2, sort_keys=True)) + return 0 + + +def _target(path_value: str) -> Path: + return Path(path_value).expanduser().resolve() + + +def _read_json(path: Path) -> dict[str, Any] | None: + try: + return json.loads(path.read_text(encoding="utf-8")) + except Exception: + return None + + +def _load_portable_root(target_root: Path) -> dict[str, Any] | None: + candidates = [ + target_root / "manifests" / "portable-ai-root.json", + target_root / "manifests" / "portable-ai-root.laptop-safe.json", + ] + for candidate in candidates: + if candidate.exists(): + payload = _read_json(candidate) + if payload: + return payload + return None + + +def _load_model_packs(target_root: Path) -> list[dict[str, Any]]: + manifest_dir = target_root / "manifests" + if not manifest_dir.exists(): + return [] + packs: list[dict[str, Any]] = [] + for path in sorted(manifest_dir.glob("model-carry-pack*.json")): + payload = _read_json(path) + if payload and payload.get("kind") == "ModelCarryPack": + payload["_manifestPath"] = str(path) + packs.append(payload) + return packs + + +def _select_model_pack(model_packs: list[dict[str, Any]], requested: str | None) -> dict[str, Any] | None: + if not model_packs: + return None + if requested: + for pack in model_packs: + model = pack.get("model", {}) + if requested in {pack.get("packId"), pack.get("displayName"), model.get("name")}: + return pack + return model_packs[0] + + +def _endpoint(host: str, port: int) -> str: + return f"http://{host}:{port}" + + +def _runtime_env(target_root: Path, provider: str, host: str, port: int) -> dict[str, str]: + if provider == "ollama-compatible": + return { + "OLLAMA_HOST": f"{host}:{port}", + "OLLAMA_MODELS": str(target_root / "models" / "ollama"), + "SOURCEOS_PORTABLE_AI_ROOT": str(target_root), + } + if provider == "llama.cpp": + return { + "LLAMA_ARG_HOST": host, + "LLAMA_ARG_PORT": str(port), + "SOURCEOS_PORTABLE_AI_ROOT": str(target_root), + } + return { + "OPENAI_BASE_URL": _endpoint(host, port), + "SOURCEOS_PORTABLE_AI_ROOT": str(target_root), + } + + +def _runtime_command(provider: str, host: str, port: int, model_pack: dict[str, Any] | None) -> list[str]: + if provider == "ollama-compatible": + return ["ollama", "serve"] + if provider == "llama.cpp": + model_name = (model_pack or {}).get("model", {}).get("name", "") + return ["llama-server", "--host", host, "--port", str(port), "--model", model_name] + return ["", "--host", host, "--port", str(port)] + + +def _surface_handoff(surface: str, endpoint: str, provider: str) -> dict[str, Any]: + return { + "surface": surface, + "endpoint": endpoint, + "provider": provider, + "handoffKind": "local-endpoint-ref", + "secretFree": True, + "promptEgressDefault": "deny", + "toolUseDefault": "deny", + } + + +def start_plan(args) -> int: + """Render a concrete runtime/surface launch plan without starting daemons.""" + target_root = _target(args.target_root) + provider = args.provider + host = args.host + port = args.port + model_packs = _load_model_packs(target_root) + selected_pack = _select_model_pack(model_packs, args.model) + endpoint = _endpoint(host, port) + env = _runtime_env(target_root, provider, host, port) + command = _runtime_command(provider, host, port, selected_pack) + runtime_binary = shutil.which(command[0]) if command and not command[0].startswith("<") else None + + warnings: list[str] = [] + if provider == "ollama-compatible" and runtime_binary is None: + warnings.append("ollama binary was not found on PATH; install/stage runtime before activation") + if provider == "llama.cpp" and runtime_binary is None: + warnings.append("llama-server binary was not found on PATH; install/stage runtime before activation") + if not selected_pack: + warnings.append("no ModelCarryPack manifest found; runtime can be staged but model route is not selected") + + return _print_json( + { + "type": "PortableAIStartPlan", + "apiVersion": portable_ai.PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target_root), + "portableRootManifestPresent": _load_portable_root(target_root) is not None, + "provider": provider, + "surface": args.surface, + "endpoint": endpoint, + "bindAddress": host, + "port": port, + "runtimeBinary": runtime_binary, + "runtimeEnv": env, + "runtimeCommand": command, + "modelPacksFound": len(model_packs), + "selectedModelPack": selected_pack, + "surfaceHandoff": _surface_handoff(args.surface, endpoint, provider), + "wouldStartRuntime": False, + "wouldContactProvider": False, + "wouldDownloadModel": False, + "requiresAgentMachineActivation": True, + "requiresPolicyAdmission": True, + "requiresAgentRegistryGrant": True, + "promptEgressDefault": "deny", + "hostWritesDefault": "deny", + "networkDefault": "loopback-only", + "stopPlanCommand": [ + "python3", + "bin/sourceosctl", + "portable-ai", + "stop-plan", + str(target_root), + "--provider", + provider, + "--host", + host, + "--port", + str(port), + ], + "warnings": warnings, + } + ) + + +def stop_plan(args) -> int: + """Render a concrete runtime teardown plan without killing processes.""" + target_root = _target(args.target_root) + provider = args.provider + host = args.host + port = args.port + endpoint = _endpoint(host, port) + + if provider == "ollama-compatible": + processMatch = "ollama serve" + elif provider == "llama.cpp": + processMatch = "llama-server" + else: + processMatch = "openai-compatible-local-server" + + return _print_json( + { + "type": "PortableAIStopPlan", + "apiVersion": portable_ai.PORTABLE_LAYOUT_VERSION, + "capturedAt": _now(), + "targetRoot": str(target_root), + "provider": provider, + "endpoint": endpoint, + "bindAddress": host, + "port": port, + "processMatchHint": processMatch, + "wouldStopRuntime": False, + "wouldKillProcesses": False, + "requiresOperatorConfirmation": True, + "requiresAgentMachineTeardown": True, + "teardownEvidenceExpected": True, + "safeEjectRequires": [ + "runtime process stopped", + "loopback port released", + "temporary runtime files flushed", + "PortableRuntimeTeardownReceipt written", + ], + "operatorGuidance": [ + "Use Agent Machine teardown once activation support lands.", + "Until then, stop only the runtime process that matches the endpoint/provider in this plan.", + "Do not remove model blobs or evidence unless running an explicit wipe flow.", + ], + } + ) From 4bc43bf106d3b86e7ad003b5506c2db698b67d87 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:02:16 -0400 Subject: [PATCH 16/22] Wire portable AI runtime start and stop plans --- sourceosctl/commands/portable_ai_cli.py | 26 ++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/sourceosctl/commands/portable_ai_cli.py b/sourceosctl/commands/portable_ai_cli.py index 042b489..73af568 100644 --- a/sourceosctl/commands/portable_ai_cli.py +++ b/sourceosctl/commands/portable_ai_cli.py @@ -4,7 +4,7 @@ import argparse -from sourceosctl.commands import portable_ai, portable_ai_byom +from sourceosctl.commands import portable_ai, portable_ai_byom, portable_ai_runtime SURFACES = [ @@ -34,6 +34,18 @@ ] +def add_runtime_common(p: argparse.ArgumentParser) -> None: + p.add_argument("target_root", help="Target USB/SSD portable root") + p.add_argument( + "--provider", + default=portable_ai_runtime.DEFAULT_PROVIDER, + choices=portable_ai_runtime.SUPPORTED_PROVIDERS, + help="Local runtime provider class", + ) + p.add_argument("--host", default=portable_ai_runtime.DEFAULT_HOST, help="Loopback host bind address") + p.add_argument("--port", type=int, default=portable_ai_runtime.DEFAULT_PORT, help="Local runtime port") + + def build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="sourceosctl portable-ai", @@ -147,14 +159,22 @@ def build_parser() -> argparse.ArgumentParser: "start-plan", help="Render a local runtime/surface launch plan without starting daemons", ) - start_p.add_argument("target_root", help="Target USB/SSD portable root") + add_runtime_common(start_p) start_p.add_argument( "--surface", default="turtleterm", choices=SURFACES, help="Launch surface", ) - start_p.set_defaults(func=portable_ai.start_plan) + start_p.add_argument("--model", default=None, help="Optional pack id, display name, or model name to select") + start_p.set_defaults(func=portable_ai_runtime.start_plan) + + stop_p = sub.add_parser( + "stop-plan", + help="Render a local runtime teardown plan without killing processes", + ) + add_runtime_common(stop_p) + stop_p.set_defaults(func=portable_ai_runtime.stop_plan) inspect_p = sub.add_parser("inspect", help="Inspect portable root layout state") inspect_p.add_argument("target_root", help="Target USB/SSD portable root") From 79381878dd7e9c4b9a17394823a45e5c3b8ff75e Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:03:22 -0400 Subject: [PATCH 17/22] Add portable AI runtime plan tests --- tests/test_portable_ai_cli.py | 38 +++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/tests/test_portable_ai_cli.py b/tests/test_portable_ai_cli.py index 51f950a..696acd2 100644 --- a/tests/test_portable_ai_cli.py +++ b/tests/test_portable_ai_cli.py @@ -191,9 +191,43 @@ def test_byom_execute_writes_manifest_after_prepare(self): self.assertTrue(payload["manifestWritten"]) self.assertFalse(payload["downloadedModel"]) - def test_start_plan(self): + def test_start_plan_emits_runtime_env_and_command_without_starting(self): with tempfile.TemporaryDirectory() as tmpdir: - self.assertEqual(portable_ai_main(["start-plan", tmpdir, "--surface", "turtleterm"]), 0) + captured = {} + + def capture(payload): + captured.update(payload) + return 0 + + with mock.patch("sourceosctl.commands.portable_ai_runtime._print_json", side_effect=capture): + self.assertEqual(portable_ai_main(["start-plan", tmpdir, "--surface", "turtleterm"]), 0) + + self.assertEqual(captured["type"], "PortableAIStartPlan") + self.assertEqual(captured["provider"], "ollama-compatible") + self.assertEqual(captured["bindAddress"], "127.0.0.1") + self.assertEqual(captured["port"], 11434) + self.assertIn("OLLAMA_MODELS", captured["runtimeEnv"]) + self.assertEqual(captured["runtimeCommand"], ["ollama", "serve"]) + self.assertFalse(captured["wouldStartRuntime"]) + self.assertFalse(captured["wouldDownloadModel"]) + self.assertTrue(captured["requiresAgentMachineActivation"]) + + def test_stop_plan_does_not_kill_processes(self): + with tempfile.TemporaryDirectory() as tmpdir: + captured = {} + + def capture(payload): + captured.update(payload) + return 0 + + with mock.patch("sourceosctl.commands.portable_ai_runtime._print_json", side_effect=capture): + self.assertEqual(portable_ai_main(["stop-plan", tmpdir]), 0) + + self.assertEqual(captured["type"], "PortableAIStopPlan") + self.assertFalse(captured["wouldStopRuntime"]) + self.assertFalse(captured["wouldKillProcesses"]) + self.assertTrue(captured["requiresOperatorConfirmation"]) + self.assertTrue(captured["requiresAgentMachineTeardown"]) def test_inspect(self): with tempfile.TemporaryDirectory() as tmpdir: From 1738116673b11adb18a870128dfed0402713885e Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:04:15 -0400 Subject: [PATCH 18/22] Document portable AI runtime start and stop plans --- README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 125b5dc..23bbbbc 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ It should contain: - lab/profile selection utilities; - local model-service client helpers; - model-router client utilities; -- Portable AI Kit preflight, prepare, BYOM verification, launch-plan, inspect, and evidence helpers; +- Portable AI Kit preflight, prepare, BYOM verification, runtime start/stop plans, inspect, and evidence helpers; - guardrail/eval/evidence helpers; - agent sandbox/run helpers; - fingerprint and proof bundle tools; @@ -47,6 +47,7 @@ python3 bin/sourceosctl portable-ai preflight /Volumes/SOURCEOS_AI python3 bin/sourceosctl portable-ai prepare /Volumes/SOURCEOS_AI --profile laptop-safe --dry-run python3 bin/sourceosctl portable-ai prepare /Volumes/SOURCEOS_AI --profile laptop-safe --execute --policy-ok --evidence-out ./portable-ai-evidence.json python3 bin/sourceosctl portable-ai start-plan /Volumes/SOURCEOS_AI --surface turtleterm +python3 bin/sourceosctl portable-ai stop-plan /Volumes/SOURCEOS_AI python3 bin/sourceosctl portable-ai inspect /Volumes/SOURCEOS_AI ``` @@ -56,8 +57,18 @@ BYOM GGUF/local model verification path: python3 bin/sourceosctl portable-ai prepare /Volumes/SOURCEOS_AI --profile byom-gguf --execute --policy-ok python3 bin/sourceosctl portable-ai byom verify /Volumes/SOURCEOS_AI ./models/example.gguf --name example --license-ref operator-attestation-required python3 bin/sourceosctl portable-ai byom verify /Volumes/SOURCEOS_AI ./models/example.gguf --name example --license-ref operator-attestation-required --execute --policy-ok --evidence-out ./byom-evidence.json +python3 bin/sourceosctl portable-ai start-plan /Volumes/SOURCEOS_AI --provider ollama-compatible --surface turtleterm --model example ``` +Runtime planning path: + +```bash +python3 bin/sourceosctl portable-ai start-plan /Volumes/SOURCEOS_AI --provider ollama-compatible --host 127.0.0.1 --port 11434 --surface turtleterm +python3 bin/sourceosctl portable-ai stop-plan /Volumes/SOURCEOS_AI --provider ollama-compatible --host 127.0.0.1 --port 11434 +``` + +`start-plan` emits runtime environment variables, local endpoint refs, command hints such as `ollama serve`, surface handoff data, model-pack selection, and Agent Machine activation requirements. `stop-plan` emits teardown guidance and safe-eject prerequisites. Neither command starts daemons, kills processes, downloads models, grants network, grants tool use, or stores prompt bodies. + BYOM verification hashes a local file, records size and SHA-256, and emits a `ModelCarryPack` manifest. It does **not** download a model, contact a provider, start a runtime, grant network, grant tool use, or store prompt bodies. Portable AI Kit does **not** download model weights implicitly, start local daemons implicitly, run inference during preflight, or authorize prompt egress by default. Runtime activation belongs to Agent Machine. Model pack definitions belong to `SourceOS-Linux/sourceos-model-carry`. Routing belongs to `SocioProphet/model-router` under Policy Fabric posture. From 5e427982ee4ee0ee8741a54274f522660f221b85 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:16:00 -0400 Subject: [PATCH 19/22] Add Homebrew formula for sourceos-devtools --- .../homebrew/Formula/sourceos-devtools.rb | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 packaging/homebrew/Formula/sourceos-devtools.rb diff --git a/packaging/homebrew/Formula/sourceos-devtools.rb b/packaging/homebrew/Formula/sourceos-devtools.rb new file mode 100644 index 0000000..da64dea --- /dev/null +++ b/packaging/homebrew/Formula/sourceos-devtools.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +class SourceosDevtools < Formula + desc "SourceOS developer and Portable AI Kit operator tools" + homepage "https://github.com/SourceOS-Linux/sourceos-devtools" + head "https://github.com/SourceOS-Linux/sourceos-devtools.git", branch: "main" + + depends_on "python@3.12" + + def install + libexec.install Dir["*"] + + chmod 0755, libexec/"bin/sourceosctl" + chmod 0755, libexec/"bin/sourceos-portable-ai" + + (bin/"sourceosctl").write_env_script libexec/"bin/sourceosctl", { + PATH: "#{Formula["python@3.12"].opt_bin}:$PATH", + PYTHONPATH: libexec, + } + + (bin/"sourceos-portable-ai").write_env_script libexec/"bin/sourceos-portable-ai", { + PATH: "#{Formula["python@3.12"].opt_bin}:$PATH", + PYTHONPATH: libexec, + } + end + + test do + assert_match "PortableAIProfiles", shell_output("#{bin}/sourceosctl portable-ai profiles") + assert_match "PortableAIProfiles", shell_output("#{bin}/sourceos-portable-ai profiles") + end +end From 6298a4af2b7b78cfc0947babb6578bea262f1d66 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:18:59 -0400 Subject: [PATCH 20/22] Add sourceos-devtools install guide --- docs/install.md | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 docs/install.md diff --git a/docs/install.md b/docs/install.md new file mode 100644 index 0000000..f03b345 --- /dev/null +++ b/docs/install.md @@ -0,0 +1,82 @@ +# SourceOS Devtools Install Guide + +This guide documents the install surface for `sourceos-devtools`, including SourceOS Portable AI Kit commands. + +The install surface intentionally installs CLI/planning tools only. It does not install model weights, pull models, start local model runtimes, grant network access, or mutate a Portable AI Kit root by default. + +## Direct Homebrew install + +Before tap promotion, install from the repository formula: + +```bash +brew install --HEAD https://raw.githubusercontent.com/SourceOS-Linux/sourceos-devtools/main/packaging/homebrew/Formula/sourceos-devtools.rb +``` + +After tap promotion: + +```bash +brew install SourceOS-Linux/tap/sourceos-devtools +``` + +Validate: + +```bash +sourceosctl portable-ai profiles +sourceos-portable-ai profiles +``` + +## Checkout development flow + +```bash +git clone https://github.com/SourceOS-Linux/sourceos-devtools.git +cd sourceos-devtools +python3 bin/sourceosctl portable-ai profiles +python3 -m unittest discover -s tests -v +``` + +## Portable AI Kit smoke test + +Use a temporary or removable target root. The commands below do not download models or start daemons. + +```bash +TARGET=/tmp/SOURCEOS_AI +python3 bin/sourceosctl portable-ai preflight "$TARGET" +python3 bin/sourceosctl portable-ai prepare "$TARGET" --profile laptop-safe --dry-run +python3 bin/sourceosctl portable-ai prepare "$TARGET" --profile laptop-safe --execute --policy-ok --evidence-out ./portable-ai-evidence.json +python3 bin/sourceosctl portable-ai start-plan "$TARGET" --provider ollama-compatible --surface turtleterm +python3 bin/sourceosctl portable-ai stop-plan "$TARGET" --provider ollama-compatible +python3 bin/sourceosctl portable-ai inspect "$TARGET" +``` + +## BYOM verification smoke test + +```bash +TARGET=/tmp/SOURCEOS_AI +mkdir -p ./tmp-models +printf 'demo model bytes\n' > ./tmp-models/example.gguf +python3 bin/sourceosctl portable-ai prepare "$TARGET" --profile byom-gguf --execute --policy-ok +python3 bin/sourceosctl portable-ai byom verify "$TARGET" ./tmp-models/example.gguf --name example +python3 bin/sourceosctl portable-ai byom verify "$TARGET" ./tmp-models/example.gguf --name example --execute --policy-ok --evidence-out ./byom-evidence.json +``` + +BYOM verification hashes a local file, records file size and SHA-256, writes a `ModelCarryPack` manifest only with `--execute --policy-ok`, and records that no download occurred. + +## Policy posture + +Default posture: + +- prompt egress is denied; +- tool use is denied; +- model downloads are explicit only; +- runtime activation remains Agent Machine gated; +- preflight is non-mutating unless `--benchmark` is requested; +- `--benchmark` writes and removes a temporary local file and records cleanup in evidence; +- runtime start/stop commands are plans only at this layer; +- local model file materialization requires `--execute --policy-ok`. + +## Promotion path + +1. Land the repo-local formula. +2. Validate direct formula install. +3. Promote formula into `SourceOS-Linux/tap` after release checks. +4. Add signed release artifacts and checksums once versioned packaging stabilizes. From 9ed8b950c41fefca19cdfe393a98019bee16bed1 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:20:21 -0400 Subject: [PATCH 21/22] Add packaging validator --- scripts/validate_packaging.py | 82 +++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 scripts/validate_packaging.py diff --git a/scripts/validate_packaging.py b/scripts/validate_packaging.py new file mode 100644 index 0000000..3bdcb95 --- /dev/null +++ b/scripts/validate_packaging.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +"""Validate sourceos-devtools packaging scaffolding. + +This validator is intentionally lightweight and stdlib-only. It checks that the +repo-local Homebrew formula continues to expose the SourceOS CLI surfaces without +embedding secrets, model downloads, or runtime side effects. +""" + +from __future__ import annotations + +import sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +FORMULA = ROOT / "packaging/homebrew/Formula/sourceos-devtools.rb" +INSTALL_DOC = ROOT / "docs/install.md" + +REQUIRED_FORMULA_SNIPPETS = [ + "class SourceosDevtools < Formula", + "desc \"SourceOS developer and Portable AI Kit operator tools\"", + "head \"https://github.com/SourceOS-Linux/sourceos-devtools.git\", branch: \"main\"", + "depends_on \"python@3.12\"", + "sourceosctl", + "sourceos-portable-ai", + "PortableAIProfiles", +] + +FORBIDDEN_FORMULA_SNIPPETS = [ + "curl ", + "wget ", + "ollama pull", + "ollama run", + "ollama serve", + "HUGGINGFACE", + "HF_TOKEN", + "OPENAI_API_KEY", +] + +REQUIRED_DOC_SNIPPETS = [ + "brew install --HEAD", + "sourceosctl portable-ai profiles", + "portable-ai preflight", + "portable-ai prepare", + "portable-ai start-plan", + "portable-ai stop-plan", + "portable-ai byom verify", + "prompt egress is denied", +] + + +def fail(message: str) -> int: + print(f"ERR: {message}", file=sys.stderr) + return 1 + + +def main() -> int: + if not FORMULA.exists(): + return fail(f"missing {FORMULA.relative_to(ROOT)}") + if not INSTALL_DOC.exists(): + return fail(f"missing {INSTALL_DOC.relative_to(ROOT)}") + + formula = FORMULA.read_text(encoding="utf-8") + install_doc = INSTALL_DOC.read_text(encoding="utf-8") + + for snippet in REQUIRED_FORMULA_SNIPPETS: + if snippet not in formula: + return fail(f"formula missing required snippet: {snippet}") + + for snippet in FORBIDDEN_FORMULA_SNIPPETS: + if snippet in formula: + return fail(f"formula contains forbidden side-effect/secrets snippet: {snippet}") + + for snippet in REQUIRED_DOC_SNIPPETS: + if snippet not in install_doc: + return fail(f"install doc missing required snippet: {snippet}") + + print("Packaging validation passed") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) From 740997c504152e6e5edd0e5e42558c3285cf5762 Mon Sep 17 00:00:00 2001 From: mdheller <21163552+mdheller@users.noreply.github.com> Date: Thu, 7 May 2026 01:21:57 -0400 Subject: [PATCH 22/22] Add validation target for devtools packaging --- Makefile | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..08cb7f6 --- /dev/null +++ b/Makefile @@ -0,0 +1,13 @@ +.PHONY: validate test validate-packaging + +validate: test validate-packaging + @test -f README.md + @test -f docs/install.md + @test -f docs/integration/portable-ai-kit.md + @test -f packaging/homebrew/Formula/sourceos-devtools.rb + +test: + python3 -m unittest discover -s tests -v + +validate-packaging: + python3 scripts/validate_packaging.py