Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions desktop/src-tauri/src/commands/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ mod export_util;
mod identity;
mod media;
mod messages;
mod persona_chat;
mod personas;
mod profile;
mod teams;
Expand All @@ -25,6 +26,7 @@ pub use dms::*;
pub use identity::*;
pub use media::*;
pub use messages::*;
pub use persona_chat::*;
pub use personas::*;
pub use profile::*;
pub use teams::*;
Expand Down
136 changes: 136 additions & 0 deletions desktop/src-tauri/src/commands/persona_chat.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
use serde::{Deserialize, Serialize};

use crate::managed_agents::resolve_command;

#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChatMessage {
pub role: String,
pub content: String,
}

#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ChatResponse {
pub content: String,
}

/// Send messages to an LLM for the persona creator chat.
///
/// Uses goose - the app's primary agent runtime - which resolves
/// provider, model, and credentials from its own config.
#[tauri::command]
pub async fn persona_creator_chat(
system_prompt: String,
messages: Vec<ChatMessage>,
) -> Result<ChatResponse, String> {
let goose_path = resolve_command("goose", None).ok_or_else(|| {
"No LLM runtime found. Install goose to use the AI persona creator.".to_string()
})?;

goose_chat(goose_path, system_prompt, messages).await
}

/// Format the conversation history as a single text prompt for goose.
///
/// For single-turn (one user message), returns the message content directly.
/// For multi-turn, includes prior exchanges as context so the LLM can continue
/// the conversation coherently.
fn format_conversation_prompt(messages: &[ChatMessage]) -> String {
if messages.len() <= 1 {
return messages
.first()
.map(|m| m.content.clone())
.unwrap_or_default();
}

let mut parts = Vec::with_capacity(messages.len());
for (i, msg) in messages.iter().enumerate() {
if i < messages.len() - 1 {
let label = if msg.role == "assistant" {
"Assistant"
} else {
"User"
};
parts.push(format!("{label}: {}", msg.content));
}
}

let history = parts.join("\n\n");
let last = &messages[messages.len() - 1].content;

format!(
"Here is our conversation so far:\n\n{history}\n\n---\n\nNow respond to this message:\n\n{last}"
)
}

/// Run a one-shot LLM completion through goose.
async fn goose_chat(
goose_path: std::path::PathBuf,
system_prompt: String,
messages: Vec<ChatMessage>,
) -> Result<ChatResponse, String> {
let prompt_text = format_conversation_prompt(&messages);

let output = tokio::task::spawn_blocking(move || {
std::process::Command::new(&goose_path)
.args([
"run",
"-t",
&prompt_text,
"--system",
&system_prompt,
"--no-session",
"--no-profile",
"--max-turns",
"1",
"-q",
"--output-format",
"json",
])
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.output()
.map_err(|e| format!("failed to spawn goose: {e}"))
})
.await
.map_err(|e| format!("goose task failed: {e}"))?
.map_err(|e: String| e)?;

if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!(
"goose exited with {}: {}",
output.status.code().unwrap_or(-1),
stderr.chars().take(500).collect::<String>()
));
}

let response: serde_json::Value = serde_json::from_slice(&output.stdout)
.map_err(|e| format!("failed to parse goose JSON: {e}"))?;

// Extract the last assistant message's text content.
let content = response["messages"]
.as_array()
.and_then(|msgs| {
msgs.iter()
.rev()
.find(|m| m["role"].as_str() == Some("assistant"))
})
.and_then(|msg| msg["content"].as_array())
.and_then(|blocks| {
blocks
.iter()
.find(|b| b["type"].as_str() == Some("text"))
.and_then(|b| b["text"].as_str())
})
.unwrap_or("")
.to_string();

if content.is_empty() {
return Err("goose returned no assistant response".to_string());
}

Ok(ChatResponse { content })
}
1 change: 1 addition & 0 deletions desktop/src-tauri/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,7 @@ pub fn run() {
parse_team_file,
parse_persona_files,
export_persona_to_json,
persona_creator_chat,
get_channel_workflows,
get_workflow,
create_workflow,
Expand Down
22 changes: 22 additions & 0 deletions desktop/src/features/agents/persona-creator/chat.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import { invokeTauri } from "@/shared/api/tauri";

import { PERSONA_CREATOR_SYSTEM_PROMPT } from "./prompt";

type ChatResponse = {
content: string;
};

/**
* Send conversation messages to the LLM for the persona creator.
* Calls the `persona_creator_chat` Tauri command which handles
* API key resolution and provider selection.
*/
export async function personaCreatorChat(
messages: ReadonlyArray<{ role: string; content: string }>,
): Promise<string> {
const response = await invokeTauri<ChatResponse>("persona_creator_chat", {
systemPrompt: PERSONA_CREATOR_SYSTEM_PROMPT,
messages: messages.map((m) => ({ role: m.role, content: m.content })),
});
return response.content;
}
10 changes: 10 additions & 0 deletions desktop/src/features/agents/persona-creator/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
export {
extractJsonBlock,
parsePersonaCreatorOutput,
personaCreatorJsonSchema,
toCreateInputs,
type PersonaCreatorOutput,
type PersonaCreatorPersona,
type PersonaCreatorTeam,
} from "./schema";
export { PERSONA_CREATOR_SYSTEM_PROMPT } from "./prompt";
33 changes: 33 additions & 0 deletions desktop/src/features/agents/persona-creator/prompt.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import { personaCreatorJsonSchema } from "./schema";

export const PERSONA_CREATOR_SYSTEM_PROMPT = `You are a Persona Architect - a friendly expert who helps users design AI agent personas and teams for the Sprout desktop app.

## Your Role
Help users create one or more personas (and optionally a team to group them). Be decisive - gather what you need, then produce results. Don't over-ask.

## Conversation Flow
1. Ask what kind of agent(s) the user wants to create and what they'll be used for. One question is enough - don't pepper them with followups.
2. Once you have enough context, draft everything: display names, system prompts, and if multiple personas are involved, a team grouping. Show a preview.
3. If the user gives feedback, revise. Otherwise, output the final structured JSON immediately.

Be proactive: if the user describes multiple related personas, group them into a team automatically - don't ask permission. Make sensible default choices for names, tone, and structure. Only ask followups when genuinely ambiguous.

## Output Format
When finalizing, emit a single fenced JSON code block matching this schema:

\`\`\`
${JSON.stringify(personaCreatorJsonSchema, null, 2)}
\`\`\`

Important notes about the output:
- \`personaIndices\` in the team object are zero-based indices into the \`personas\` array.
- Only include the JSON block when the user has approved and you're ready to finalize.
- Do NOT include the JSON block in intermediate/draft messages - just show previews in plain text.

## Guidelines
- Be conversational and helpful, not robotic.
- Keep system prompts concise but effective - focus on behavior, tone, and capabilities.
- If the user is unsure, suggest reasonable defaults.
- One persona is fine - teams are optional.
- Name pools are optional fun - suggest them if appropriate (e.g. themed names).
`;
Loading
Loading