diff --git a/README.md b/README.md index b761cbd..6850132 100644 --- a/README.md +++ b/README.md @@ -367,6 +367,8 @@ Adapters are used by both `export` and `run`. Available adapters: | `openclaw` | OpenClaw format | | `nanobot` | Nanobot format | | `cursor` | Cursor `.cursor/rules/*.mdc` files | +| `langgraph` | LangGraph `StateGraph` Python module (skills → nodes, skillflows → edges) | +| `deepagents` | LangChain DeepAgents harness (`create_deep_agent(...)`) — skills, tools, sub-agents | ```bash # Export to system prompt diff --git a/examples/deepagents/README.md b/examples/deepagents/README.md new file mode 100644 index 0000000..4616e84 --- /dev/null +++ b/examples/deepagents/README.md @@ -0,0 +1,30 @@ +# DeepAgents example + +A research agent with a fact-checker sub-agent, demonstrating the DeepAgents adapter: + +- `agent.yaml` — manifest (model, skills, tools, sub-agent declaration) +- `SOUL.md` — agent identity, embedded in the generated `system_prompt` +- `skills/web-research/`, `skills/summarize/` — passed via `skills=["./skills"]`; DeepAgents loads each `SKILL.md` natively +- `tools/web-search.yaml` — emitted as a `@tool` function bound into `tools=[...]` +- `agents/fact-checker/` — emitted as a `SubAgent` dict in `subagents=[...]` +- `expected_output.py` — the Python module the adapter produces + +DeepAgents is a higher-level harness on top of LangGraph — there is no graph +wiring: the model decides when to plan, when to delegate to sub-agents, and +when to call tools. If you need explicit per-step edges, use the `langgraph` +adapter instead. + +## Regenerate + +```bash +gapman export --dir examples/deepagents --format deepagents --output examples/deepagents/expected_output.py +``` + +## Run the generated agent + +```bash +pip install deepagents langchain-anthropic +python examples/deepagents/expected_output.py +``` + +The generated file leaves tool implementations as `NotImplementedError` stubs — replace them with your own logic before invoking. diff --git a/examples/deepagents/SOUL.md b/examples/deepagents/SOUL.md new file mode 100644 index 0000000..a789e44 --- /dev/null +++ b/examples/deepagents/SOUL.md @@ -0,0 +1,5 @@ +# Research Assistant + +You are a research assistant. Find authoritative sources, extract the relevant +facts, and summarize them honestly. Delegate to the `fact-checker` sub-agent +whenever a claim warrants independent verification. Cite every claim. diff --git a/examples/deepagents/agent.yaml b/examples/deepagents/agent.yaml new file mode 100644 index 0000000..4bf007b --- /dev/null +++ b/examples/deepagents/agent.yaml @@ -0,0 +1,16 @@ +spec_version: "0.1.0" +name: research-assistant +version: 1.0.0 +description: Research assistant with a fact-checker sub-agent +author: gitagent-examples +license: MIT +model: + preferred: anthropic:claude-sonnet-4-5 +skills: + - web-research + - summarize +tools: + - web-search +agents: + fact-checker: + description: Verifies factual claims against authoritative sources diff --git a/examples/deepagents/agents/fact-checker/SOUL.md b/examples/deepagents/agents/fact-checker/SOUL.md new file mode 100644 index 0000000..c45c99f --- /dev/null +++ b/examples/deepagents/agents/fact-checker/SOUL.md @@ -0,0 +1,4 @@ +# Fact-Checker + +You are a pedantic fact-checker. Treat every claim as unverified until you +locate a primary source. If a claim cannot be substantiated, say so plainly. diff --git a/examples/deepagents/agents/fact-checker/agent.yaml b/examples/deepagents/agents/fact-checker/agent.yaml new file mode 100644 index 0000000..a6ba2c6 --- /dev/null +++ b/examples/deepagents/agents/fact-checker/agent.yaml @@ -0,0 +1,4 @@ +spec_version: "0.1.0" +name: fact-checker +version: 1.0.0 +description: Verifies factual claims against authoritative sources diff --git a/examples/deepagents/expected_output.py b/examples/deepagents/expected_output.py new file mode 100644 index 0000000..cde718b --- /dev/null +++ b/examples/deepagents/expected_output.py @@ -0,0 +1,83 @@ +""" +DeepAgents definition for research-assistant v1.0.0 +Generated by gitagent export --format deepagents +""" + +from __future__ import annotations + +from deepagents import create_deep_agent +from langchain_core.tools import tool + +# Agent metadata +AGENT_NAME = "research-assistant" +AGENT_VERSION = "1.0.0" +MODEL = "anthropic:claude-sonnet-4-5" + +# System prompt (SOUL.md + RULES.md + DUTIES.md + compliance) +SYSTEM_PROMPT = """# research-assistant +Research assistant with a fact-checker sub-agent + +# Research Assistant + +You are a research assistant. Find authoritative sources, extract the relevant +facts, and summarize them honestly. Delegate to the `fact-checker` sub-agent +whenever a claim warrants independent verification. Cite every claim. +""" + +# Hooks (pre_tool_use scripts run before every tool call) +def _run_pre_tool_use_hooks(tool_name: str) -> None: + """No pre_tool_use hooks configured in hooks/hooks.yaml.""" + return None + +# NOTE: other hook events (post_tool_use, on_session_start, etc.) have no +# DeepAgents equivalent and are intentionally skipped. +# NOTE: memory/ has no direct DeepAgents equivalent — wire a checkpointer if needed. + +# Tools (from tools/*.yaml) +@tool +def web_search(query: str) -> str: + """Search the public web for a query""" + _run_pre_tool_use_hooks("web-search") + raise NotImplementedError("Implement tool: web-search") + +TOOLS = [web_search] + +# Skills (skills//SKILL.md — DeepAgents loads these natively) +# Pointing skills= at the directory lets DeepAgents discover every SKILL.md +# without us having to inline the skill content into SYSTEM_PROMPT. +SKILLS = ["./skills"] + +# For reference, the skills available in this agent: +# - summarize: Condense the gathered sources into a faithful, cited summary +# - web-research: Search the web for sources relevant to the user's question + +# Sub-agents (agents// → SubAgent dicts) +fact_checker_subagent = { + "name": "fact-checker", + "description": "Verifies factual claims against authoritative sources", + "system_prompt": """# fact-checker +Verifies factual claims against authoritative sources + +# Fact-Checker + +You are a pedantic fact-checker. Treat every claim as unverified until you +locate a primary source. If a claim cannot be substantiated, say so plainly. +""", + "tools": TOOLS, +} + +SUBAGENTS = [fact_checker_subagent] + +# Agent +agent = create_deep_agent( + model=MODEL, + system_prompt=SYSTEM_PROMPT, + tools=TOOLS, + skills=SKILLS, + subagents=SUBAGENTS, +) + +if __name__ == "__main__": + result = agent.invoke({"messages": [{"role": "user", "content": "Hello"}]}) + for message in result["messages"]: + print(message) diff --git a/examples/deepagents/skills/summarize/SKILL.md b/examples/deepagents/skills/summarize/SKILL.md new file mode 100644 index 0000000..e4e9a75 --- /dev/null +++ b/examples/deepagents/skills/summarize/SKILL.md @@ -0,0 +1,8 @@ +--- +name: summarize +description: Condense the gathered sources into a faithful, cited summary +--- + +Produce a concise summary of the gathered material. Every factual claim must +be attributed to a specific source URL. If sources disagree, surface the +disagreement explicitly. diff --git a/examples/deepagents/skills/web-research/SKILL.md b/examples/deepagents/skills/web-research/SKILL.md new file mode 100644 index 0000000..9ae2d4b --- /dev/null +++ b/examples/deepagents/skills/web-research/SKILL.md @@ -0,0 +1,8 @@ +--- +name: web-research +description: Search the web for sources relevant to the user's question +allowed-tools: web-search +--- + +Use the `web-search` tool to find authoritative sources. Capture the URL of +every source consulted; downstream skills will cite them. diff --git a/examples/deepagents/tools/web-search.yaml b/examples/deepagents/tools/web-search.yaml new file mode 100644 index 0000000..4f84157 --- /dev/null +++ b/examples/deepagents/tools/web-search.yaml @@ -0,0 +1,10 @@ +name: web-search +description: Search the public web for a query +input_schema: + type: object + properties: + query: + type: string + description: The search query + required: + - query diff --git a/examples/langgraph/README.md b/examples/langgraph/README.md new file mode 100644 index 0000000..40a11ee --- /dev/null +++ b/examples/langgraph/README.md @@ -0,0 +1,26 @@ +# LangGraph example + +A two-step research agent that demonstrates the LangGraph adapter: + +- `agent.yaml` — manifest (model, runtime, skill + tool references) +- `SOUL.md` — agent identity, embedded in the generated system prompt +- `skills/web-research/SKILL.md` — first node in the graph +- `skills/summarize/SKILL.md` — second node, depends on `web-research` +- `skillflows/research.yaml` — wiring (`steps`, `depends_on`) → `add_edge` calls +- `tools/web-search.yaml` — bound as a `ToolNode` +- `expected_output.py` — the Python module the adapter produces + +## Regenerate + +```bash +gapman export --dir examples/langgraph --format langgraph --output examples/langgraph/expected_output.py +``` + +## Run the generated graph + +```bash +pip install "langgraph>=0.2" "langchain>=0.3" "langchain-core>=0.3" langchain-anthropic +python examples/langgraph/expected_output.py +``` + +The generated file leaves tool implementations as `NotImplementedError` stubs — replace them with your own logic before invoking. diff --git a/examples/langgraph/SOUL.md b/examples/langgraph/SOUL.md new file mode 100644 index 0000000..e26ee4f --- /dev/null +++ b/examples/langgraph/SOUL.md @@ -0,0 +1,4 @@ +# Research Assistant + +You are a research assistant. Find authoritative sources, extract the relevant +facts, and summarize them honestly. Cite every claim. When uncertain, say so. diff --git a/examples/langgraph/agent.yaml b/examples/langgraph/agent.yaml new file mode 100644 index 0000000..57e97c8 --- /dev/null +++ b/examples/langgraph/agent.yaml @@ -0,0 +1,15 @@ +spec_version: "0.1.0" +name: research-assistant +version: 1.0.0 +description: Two-step research assistant — searches the web, then summarizes findings +author: gitagent-examples +license: MIT +model: + preferred: claude-sonnet-4-5 +runtime: + max_turns: 25 +skills: + - web-research + - summarize +tools: + - web-search diff --git a/examples/langgraph/expected_output.py b/examples/langgraph/expected_output.py new file mode 100644 index 0000000..eb89c8d --- /dev/null +++ b/examples/langgraph/expected_output.py @@ -0,0 +1,109 @@ +""" +LangGraph definition for research-assistant v1.0.0 +Generated by gitagent export --format langgraph +""" + +from __future__ import annotations + +from typing import Annotated, TypedDict + +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from langchain_core.tools import tool +from langchain.chat_models import init_chat_model +from langgraph.graph import END, START, StateGraph +from langgraph.graph.message import add_messages +from langgraph.prebuilt import ToolNode + +# Agent metadata +AGENT_NAME = "research-assistant" +AGENT_VERSION = "1.0.0" +AGENT_DESCRIPTION = "Two-step research assistant — searches the web, then summarizes findings" +MODEL = "claude-sonnet-4-5" +RECURSION_LIMIT = 25 + +# System prompt (SOUL.md + RULES.md + DUTIES.md + compliance) +SYSTEM_PROMPT = """# research-assistant +Two-step research assistant — searches the web, then summarizes findings + +# Research Assistant + +You are a research assistant. Find authoritative sources, extract the relevant +facts, and summarize them honestly. Cite every claim. When uncertain, say so. +""" + +# Skill instructions +SKILL_SUMMARIZE_INSTRUCTIONS = """Condense the gathered sources into a faithful, cited summary + +Produce a concise summary of the gathered material. Every factual claim must +be attributed to a specific source URL. If the sources disagree, say so +explicitly rather than picking a side.""" + +SKILL_WEB_RESEARCH_INSTRUCTIONS = """Search the web for sources relevant to the user's question +Allowed tools: web-search + +Use the `web-search` tool to find authoritative sources. Prefer primary sources +(official docs, academic papers) over secondary commentary. Capture the URL of +every source consulted.""" + +# Graph state +class AgentState(TypedDict): + messages: Annotated[list[BaseMessage], add_messages] + +# Tools (from tools/*.yaml) +@tool +def web_search(query: str) -> str: + """Search the public web for a query""" + raise NotImplementedError("Implement tool: web-search") + +TOOLS = [web_search] +tool_node = ToolNode(TOOLS) + +llm = init_chat_model(MODEL) +llm_with_tools = llm.bind_tools(TOOLS) if TOOLS else llm + +# Hooks (pre_tool_use → before_tool callback) +def before_tool(state: AgentState) -> AgentState: + """No pre_tool_use hooks configured in hooks/hooks.yaml.""" + return state + +# NOTE: post_tool_use, on_session_start, etc. have no LangGraph equivalent — skipped. +# NOTE: memory/ has no direct LangGraph equivalent — implement via a checkpointer. + +# Skill nodes (one per skills//SKILL.md) +def skill_summarize(state: AgentState) -> dict: + """Skill node: summarize — Condense the gathered sources into a faithful, cited summary""" + prompt = SYSTEM_PROMPT + "\n\n" + SKILL_SUMMARIZE_INSTRUCTIONS + messages = [SystemMessage(content=prompt), *state["messages"]] + response = llm_with_tools.invoke(messages) + return {"messages": [response]} + +def skill_web_research(state: AgentState) -> dict: + """Skill node: web-research — Search the web for sources relevant to the user's question""" + prompt = SYSTEM_PROMPT + "\n\n" + SKILL_WEB_RESEARCH_INSTRUCTIONS + messages = [SystemMessage(content=prompt), *state["messages"]] + response = llm_with_tools.invoke(messages) + return {"messages": [response]} + +# Graph construction +graph = StateGraph(AgentState) + +graph.add_node("tools", tool_node) + +# Skillflow: research +graph.add_node("research", skill_web_research) +graph.add_node("summarize", skill_summarize) +graph.add_edge(START, "research") +graph.add_edge("research", "summarize") +graph.add_edge("summarize", END) + + +# Compile +app = graph.compile() + +if __name__ == "__main__": + result = app.invoke( + {"messages": [HumanMessage(content="Hello")]}, + config={"recursion_limit": RECURSION_LIMIT}, + ) + for message in result["messages"]: + print(message) diff --git a/examples/langgraph/skillflows/research.yaml b/examples/langgraph/skillflows/research.yaml new file mode 100644 index 0000000..f79aef2 --- /dev/null +++ b/examples/langgraph/skillflows/research.yaml @@ -0,0 +1,8 @@ +name: research +description: Run web-research, then summarize the result +steps: + - id: research + skill: web-research + - id: summarize + skill: summarize + depends_on: [research] diff --git a/examples/langgraph/skills/summarize/SKILL.md b/examples/langgraph/skills/summarize/SKILL.md new file mode 100644 index 0000000..6dc0881 --- /dev/null +++ b/examples/langgraph/skills/summarize/SKILL.md @@ -0,0 +1,8 @@ +--- +name: summarize +description: Condense the gathered sources into a faithful, cited summary +--- + +Produce a concise summary of the gathered material. Every factual claim must +be attributed to a specific source URL. If the sources disagree, say so +explicitly rather than picking a side. diff --git a/examples/langgraph/skills/web-research/SKILL.md b/examples/langgraph/skills/web-research/SKILL.md new file mode 100644 index 0000000..72a25b9 --- /dev/null +++ b/examples/langgraph/skills/web-research/SKILL.md @@ -0,0 +1,9 @@ +--- +name: web-research +description: Search the web for sources relevant to the user's question +allowed-tools: web-search +--- + +Use the `web-search` tool to find authoritative sources. Prefer primary sources +(official docs, academic papers) over secondary commentary. Capture the URL of +every source consulted. diff --git a/examples/langgraph/tools/web-search.yaml b/examples/langgraph/tools/web-search.yaml new file mode 100644 index 0000000..4f84157 --- /dev/null +++ b/examples/langgraph/tools/web-search.yaml @@ -0,0 +1,10 @@ +name: web-search +description: Search the public web for a query +input_schema: + type: object + properties: + query: + type: string + description: The search query + required: + - query diff --git a/src/adapters/deepagents.test.ts b/src/adapters/deepagents.test.ts new file mode 100644 index 0000000..3ae48b7 --- /dev/null +++ b/src/adapters/deepagents.test.ts @@ -0,0 +1,231 @@ +/** + * Tests for the DeepAgents adapter (export). + * + * Uses Node.js built-in test runner (node --test). + */ +import { test, describe } from 'node:test'; +import assert from 'node:assert/strict'; +import { mkdtempSync, writeFileSync, mkdirSync } from 'node:fs'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; + +import { exportToDeepAgents, exportToDeepAgentsString } from './deepagents.js'; + +// Helpers + +interface MakeAgentDirOpts { + name?: string; + description?: string; + version?: string; + soul?: string; + rules?: string; + duties?: string; + model?: string; + skills?: Array<{ name: string; description: string; instructions: string }>; + tools?: Array<{ name: string; description?: string; inputSchema?: Record }>; + hooks?: Record>; + subAgents?: Array<{ name: string; description: string; soul?: string }>; +} + +function makeAgentDir(opts: MakeAgentDirOpts): string { + const dir = mkdtempSync(join(tmpdir(), 'gitagent-deepagents-test-')); + + const modelBlock = opts.model ? `model:\n preferred: ${opts.model}\n` : ''; + + writeFileSync( + join(dir, 'agent.yaml'), + `spec_version: '0.1.0'\nname: ${opts.name ?? 'test-agent'}\nversion: '${opts.version ?? '0.1.0'}'\ndescription: '${opts.description ?? 'A test agent'}'\n${modelBlock}`, + 'utf-8', + ); + + if (opts.soul !== undefined) writeFileSync(join(dir, 'SOUL.md'), opts.soul, 'utf-8'); + if (opts.rules !== undefined) writeFileSync(join(dir, 'RULES.md'), opts.rules, 'utf-8'); + if (opts.duties !== undefined) writeFileSync(join(dir, 'DUTIES.md'), opts.duties, 'utf-8'); + + if (opts.skills) { + for (const skill of opts.skills) { + const skillDir = join(dir, 'skills', skill.name); + mkdirSync(skillDir, { recursive: true }); + writeFileSync( + join(skillDir, 'SKILL.md'), + `---\nname: ${skill.name}\ndescription: '${skill.description}'\n---\n\n${skill.instructions}\n`, + 'utf-8', + ); + } + } + + if (opts.tools) { + const toolsDir = join(dir, 'tools'); + mkdirSync(toolsDir, { recursive: true }); + for (const tool of opts.tools) { + const lines = [ + `name: ${tool.name}`, + `description: '${tool.description ?? ''}'`, + ]; + if (tool.inputSchema) { + lines.push(`input_schema: ${JSON.stringify(tool.inputSchema)}`); + } + writeFileSync(join(toolsDir, `${tool.name}.yaml`), lines.join('\n') + '\n', 'utf-8'); + } + } + + if (opts.hooks) { + const hooksDir = join(dir, 'hooks'); + mkdirSync(hooksDir, { recursive: true }); + const lines: string[] = ['hooks:']; + for (const [event, entries] of Object.entries(opts.hooks)) { + lines.push(` ${event}:`); + for (const entry of entries) { + lines.push(` - script: ${entry.script}`); + } + } + writeFileSync(join(hooksDir, 'hooks.yaml'), lines.join('\n') + '\n', 'utf-8'); + } + + if (opts.subAgents) { + for (const sub of opts.subAgents) { + const subDir = join(dir, 'agents', sub.name); + mkdirSync(subDir, { recursive: true }); + writeFileSync( + join(subDir, 'agent.yaml'), + `spec_version: '0.1.0'\nname: ${sub.name}\nversion: '0.1.0'\ndescription: '${sub.description}'\n`, + 'utf-8', + ); + if (sub.soul) writeFileSync(join(subDir, 'SOUL.md'), sub.soul, 'utf-8'); + } + } + + return dir; +} + +// exportToDeepAgents + +describe('exportToDeepAgents', () => { + test('returns { code: string }', () => { + const dir = makeAgentDir({ name: 'demo-agent' }); + const result = exportToDeepAgents(dir); + assert.equal(typeof result.code, 'string'); + assert.ok(result.code.length > 0); + }); + + test('imports create_deep_agent from deepagents', () => { + const dir = makeAgentDir({}); + const { code } = exportToDeepAgents(dir); + assert.match(code, /from deepagents import create_deep_agent/); + }); + + test('emits a create_deep_agent(...) call', () => { + const dir = makeAgentDir({}); + const { code } = exportToDeepAgents(dir); + assert.match(code, /agent = create_deep_agent\(/); + assert.match(code, /model=MODEL/); + assert.match(code, /system_prompt=SYSTEM_PROMPT/); + assert.match(code, /tools=TOOLS/); + }); + + test('embeds SOUL.md, RULES.md, DUTIES.md into the system prompt', () => { + const dir = makeAgentDir({ + soul: '# Soul\n\nBe precise.', + rules: '# Rules\n\nNever lie.', + duties: '# Duties\n\nThe analyst proposes, the reviewer approves.', + }); + const { code } = exportToDeepAgents(dir); + assert.match(code, /Be precise/); + assert.match(code, /Never lie/); + assert.match(code, /The analyst proposes/); + }); + + test('tools/*.yaml become @tool functions registered in TOOLS', () => { + const dir = makeAgentDir({ + tools: [ + { + name: 'web-search', + description: 'Search the web', + inputSchema: { + type: 'object', + properties: { query: { type: 'string' } }, + required: ['query'], + }, + }, + ], + }); + const { code } = exportToDeepAgents(dir); + assert.match(code, /@tool/); + assert.match(code, /def web_search\(query: str\) -> str:/); + assert.match(code, /TOOLS = \[web_search\]/); + }); + + test('skills/ becomes skills=SKILLS pointing at "./skills"', () => { + const dir = makeAgentDir({ + skills: [ + { name: 'research', description: 'Research a topic', instructions: 'Cite sources.' }, + ], + }); + const { code } = exportToDeepAgents(dir); + assert.match(code, /SKILLS = \["\.\/skills"\]/); + assert.match(code, /skills=SKILLS,/); + // The skill metadata is enumerated as a reference comment. + assert.match(code, /#\s+- research:/); + }); + + test('no skills/ → SKILLS is an empty list and not passed to create_deep_agent', () => { + const dir = makeAgentDir({}); + const { code } = exportToDeepAgents(dir); + assert.match(code, /SKILLS: list = \[\]/); + assert.doesNotMatch(code, /skills=SKILLS/); + }); + + test('sub-agents become SubAgent dicts with name, description, system_prompt', () => { + const dir = makeAgentDir({ + subAgents: [ + { name: 'fact-checker', description: 'Verifies claims', soul: '# Soul\n\nBe pedantic.' }, + ], + }); + const { code } = exportToDeepAgents(dir); + assert.match(code, /fact_checker_subagent = \{/); + assert.match(code, /"name": "fact-checker"/); + assert.match(code, /"description": "Verifies claims"/); + assert.match(code, /"system_prompt":/); + assert.match(code, /Be pedantic/); + assert.match(code, /SUBAGENTS = \[fact_checker_subagent\]/); + assert.match(code, /subagents=SUBAGENTS,/); + }); + + test('pre_tool_use hooks are invoked from inside each generated tool function', () => { + const dir = makeAgentDir({ + tools: [{ name: 'noop', description: 'A no-op' }], + hooks: { + pre_tool_use: [{ script: 'audit.sh' }], + }, + }); + const { code } = exportToDeepAgents(dir); + assert.match(code, /_run_pre_tool_use_hooks\("noop"\)/); + assert.match(code, /hooks\/audit\.sh/); + }); + + test('model.preferred is emitted as MODEL', () => { + const dir = makeAgentDir({ model: 'claude-opus-4-7' }); + const { code } = exportToDeepAgents(dir); + assert.match(code, /MODEL = "claude-opus-4-7"/); + }); + + test('default model is used when model.preferred is absent', () => { + const dir = makeAgentDir({}); + const { code } = exportToDeepAgents(dir); + assert.match(code, /MODEL = "anthropic:claude-sonnet-4-5"/); + }); +}); + +// exportToDeepAgentsString + +describe('exportToDeepAgentsString', () => { + test('matches exportToDeepAgents().code', () => { + const dir = makeAgentDir({ name: 'parity-agent' }); + assert.equal(exportToDeepAgentsString(dir), exportToDeepAgents(dir).code); + }); + + test('output contains create_deep_agent', () => { + const dir = makeAgentDir({}); + assert.match(exportToDeepAgentsString(dir), /create_deep_agent/); + }); +}); diff --git a/src/adapters/deepagents.ts b/src/adapters/deepagents.ts new file mode 100644 index 0000000..acd1732 --- /dev/null +++ b/src/adapters/deepagents.ts @@ -0,0 +1,341 @@ +import { existsSync, readFileSync, readdirSync } from 'node:fs'; +import { join, resolve } from 'node:path'; +import yaml from 'js-yaml'; +import { loadAgentManifest, loadFileIfExists, type AgentManifest } from '../utils/loader.js'; +import { loadAllSkills, type ParsedSkill } from '../utils/skill-loader.js'; +import { buildComplianceSection } from './shared.js'; + +/** + * Export a gitagent to a DeepAgents (LangChain) Python module. + * + * DeepAgents is a higher-level harness on top of LangGraph. The whole agent is + * one `create_deep_agent(...)` call. There is no graph wiring — execution flow + * (planning, sub-agent delegation, tool use) is decided by the model at runtime. + * + * Mapping: + * agent.yaml (model.preferred) → model="..." + * SOUL.md + RULES.md + DUTIES.md + compliance → system_prompt="..." + * skills/ → skills=["./skills"] (DeepAgents loads SKILL.md natively) + * tools/*.yaml → @tool defs in tools=[...] + * hooks/hooks.yaml (pre_tool_use) → wrapper that runs hook scripts before each tool call + * agents// → subagents=[{ name, description, system_prompt, ... }] + * + * Reference: https://docs.langchain.com/oss/python/deepagents/overview + */ +export interface DeepAgentsExport { + /** Generated Python source for the DeepAgents application. */ + code: string; +} + +interface ToolDef { + name: string; + description: string; + params: Array<{ name: string; pyType: string }>; +} + +interface SubAgentDef { + name: string; + description: string; + systemPrompt: string; + hasSkills: boolean; +} + +export function exportToDeepAgents(dir: string): DeepAgentsExport { + const agentDir = resolve(dir); + const manifest = loadAgentManifest(agentDir); + + const systemPrompt = buildSystemPrompt(agentDir, manifest); + const skills = loadAllSkills(join(agentDir, 'skills')); + const tools = collectTools(agentDir); + const preToolUseScripts = collectPreToolUseHooks(agentDir); + const subAgents = collectSubAgents(agentDir); + + const code = renderPython({ + manifest, + systemPrompt, + skills, + tools, + preToolUseScripts, + subAgents, + }); + + return { code }; +} + +export function exportToDeepAgentsString(dir: string): string { + return exportToDeepAgents(dir).code; +} + +// System prompt assembly +function buildSystemPrompt(agentDir: string, manifest: AgentManifest): string { + const parts: string[] = []; + + parts.push(`# ${manifest.name}`); + parts.push(manifest.description); + parts.push(''); + + const soul = loadFileIfExists(join(agentDir, 'SOUL.md')); + if (soul) { parts.push(soul.trim()); parts.push(''); } + + const rules = loadFileIfExists(join(agentDir, 'RULES.md')); + if (rules) { parts.push(rules.trim()); parts.push(''); } + + const duties = loadFileIfExists(join(agentDir, 'DUTIES.md')); + if (duties) { parts.push(duties.trim()); parts.push(''); } + + if (manifest.compliance) { + const compliance = buildComplianceSection(manifest.compliance); + if (compliance) { parts.push(compliance); parts.push(''); } + } + + return parts.join('\n').trimEnd() + '\n'; +} + +// Tool / hook / sub-agent discovery + +function collectTools(agentDir: string): ToolDef[] { + const toolsDir = join(agentDir, 'tools'); + if (!existsSync(toolsDir)) return []; + + const out: ToolDef[] = []; + const files = readdirSync(toolsDir).filter(f => f.endsWith('.yaml') || f.endsWith('.yml')); + for (const file of files) { + try { + const cfg = yaml.load(readFileSync(join(toolsDir, file), 'utf-8')) as { + name?: string; + description?: string; + input_schema?: { + properties?: Record; + }; + }; + if (!cfg?.name) continue; + const props = cfg.input_schema?.properties ?? {}; + const params = Object.entries(props).map(([name, schema]) => ({ + name, + pyType: jsonTypeToPython(schema?.type), + })); + out.push({ name: cfg.name, description: cfg.description ?? '', params }); + } catch { + // skip malformed tool + } + } + return out; +} + +function collectPreToolUseHooks(agentDir: string): string[] { + const hooksPath = join(agentDir, 'hooks', 'hooks.yaml'); + if (!existsSync(hooksPath)) return []; + try { + const cfg = yaml.load(readFileSync(hooksPath, 'utf-8')) as { + hooks?: Record>; + }; + return (cfg?.hooks?.pre_tool_use ?? []) + .map(h => h.script ?? '') + .filter(Boolean); + } catch { + return []; + } +} + +function collectSubAgents(agentDir: string): SubAgentDef[] { + const agentsDir = join(agentDir, 'agents'); + if (!existsSync(agentsDir)) return []; + + const out: SubAgentDef[] = []; + const entries = readdirSync(agentsDir, { withFileTypes: true }); + for (const entry of entries) { + if (!entry.isDirectory()) continue; + const subDir = join(agentsDir, entry.name); + if (!existsSync(join(subDir, 'agent.yaml'))) continue; + + try { + const subManifest = loadAgentManifest(subDir); + out.push({ + name: subManifest.name, + description: subManifest.description, + systemPrompt: buildSystemPrompt(subDir, subManifest), + hasSkills: existsSync(join(subDir, 'skills')), + }); + } catch { + // skip sub-agents that fail to load + } + } + return out; +} + +// Python code rendering + +interface RenderContext { + manifest: AgentManifest; + systemPrompt: string; + skills: ParsedSkill[]; + tools: ToolDef[]; + preToolUseScripts: string[]; + subAgents: SubAgentDef[]; +} + +function renderPython(ctx: RenderContext): string { + const { manifest, systemPrompt, skills, tools, preToolUseScripts, subAgents } = ctx; + const lines: string[] = []; + const model = manifest.model?.preferred ?? 'anthropic:claude-sonnet-4-5'; + + // Header + lines.push('"""'); + lines.push(`DeepAgents definition for ${manifest.name} v${manifest.version}`); + lines.push('Generated by gitagent export --format deepagents'); + lines.push('"""'); + lines.push(''); + + // Imports + lines.push('from __future__ import annotations'); + lines.push(''); + lines.push('from deepagents import create_deep_agent'); + lines.push('from langchain_core.tools import tool'); + lines.push(''); + + // Agent metadata + lines.push('# Agent metadata'); + lines.push(`AGENT_NAME = ${pyStr(manifest.name)}`); + lines.push(`AGENT_VERSION = ${pyStr(manifest.version)}`); + lines.push(`MODEL = ${pyStr(model)}`); + lines.push(''); + + // System prompt + lines.push('# System prompt (SOUL.md + RULES.md + DUTIES.md + compliance)'); + lines.push(`SYSTEM_PROMPT = ${pyTripleStr(systemPrompt)}`); + lines.push(''); + + // Pre-tool-use hook wrapper + lines.push('# Hooks (pre_tool_use scripts run before every tool call)'); + lines.push('def _run_pre_tool_use_hooks(tool_name: str) -> None:'); + if (preToolUseScripts.length > 0) { + lines.push(' """Run hooks/