const agentDefinition = {
id: "etl-manager",
displayName: "ETL Pipeline Manager",
publisher: "brandon",
version: "0.0.2",
model: "openai/gpt-5",
toolNames: [
"spawn_agents",
"think_deeply",
"add_message"
],
spawnableAgents: [
"brandon/extract-agent@0.0.2",
"brandon/transform-agent@0.0.2",
"brandon/load-agent@0.0.2"
],
inputSchema: {
params: {
type: "object",
properties: {
domain: {
type: "string",
description: "Data domain for ETL processing, e.g. places, events, projects"
},
loadParams: {
type: "object",
description: "Any special parameters for load agent"
},
extractParams: {
type: "object",
description: "Any special parameters for extract agent"
},
transformParams: {
type: "object",
description: "Any special parameters for transform agent"
}
}
},
prompt: {
type: "string",
description: "The data processing request to execute through ETL pipeline"
}
},
includeMessageHistory: true,
outputMode: "last_message",
spawnerPrompt: `Use this agent to execute a complete ETL pipeline for data processing requests`,
systemPrompt: `You are an ETL pipeline manager that coordinates sequential data processing through extract, transform, and load stages.`,
instructionsPrompt: ``,
stepPrompt: ``,
handleSteps: function* ({ prompt, params }) {
const extractPrompt = `Analyzing user request "${prompt}" to generate optimal extraction strategy. Consider: data domain (${params?.domain || "unknown"}), specific search terms needed, target sources, and query refinement for maximum relevance.`, { toolResult: extractResults } = yield {
toolName: "spawn_agents",
input: {
agents: [
{
agent_type: "extract-agent",
prompt: extractPrompt,
params: params?.extractParams || {}
}
]
}
};
if (!extractResults || extractResults.length === 0) {
yield {
toolName: "add_message",
input: {
role: "user",
content: "Extract step failed."
}
};
return;
}
const extractResult = extractResults[0]?.type === "json" ? extractResults[0].value : extractResults[0], transformPrompt = "Processing extracted data from previous step. Need to transform raw data into canonical schema. Consider: data quality, normalization needs, deduplication strategy, and enrichment opportunities based on extracted content.", { toolResult: transformResults } = yield {
toolName: "spawn_agents",
input: {
agents: [
{
agent_type: "transform-agent",
prompt: transformPrompt,
params: {
...params?.transformParams,
extractResult
}
}
]
}
};
if (!transformResults || transformResults.length === 0) {
yield {
toolName: "add_message",
input: {
role: "user",
content: "Transform step failed."
}
};
return;
}
const transformResult = transformResults[0]?.type === "json" ? transformResults[0].value : transformResults[0], loadPrompt = `Final filtering and ranking phase for user request "${prompt}". Need to apply user constraints, score relevance, and rank results. Consider: user preferences, contextual relevance, quality metrics, and practical constraints.`, { toolResult: loadResults } = yield {
toolName: "spawn_agents",
input: {
agents: [
{
agent_type: "load-agent",
prompt: loadPrompt,
params: {
...params?.loadParams,
transformResult
}
}
]
}
};
if (!loadResults || loadResults.length === 0) {
yield {
toolName: "add_message",
input: {
role: "user",
content: "Load step failed."
}
};
return;
}
const loadResult = loadResults[0]?.type === "json" ? loadResults[0].value : loadResults[0];
yield {
toolName: "add_message",
input: {
role: "user",
content: typeof loadResult === "string" ? loadResult : JSON.stringify(loadResult)
}
};
}
}