const agentDefinition = {
id: "context-pruner",
displayName: "Context Pruner",
publisher: "codebuff",
version: "0.0.2",
model: "openai/gpt-5-mini",
toolNames: [
"set_messages"
],
spawnableAgents: [],
inputSchema: {
params: {
type: "object",
required: [],
properties: {
maxContextLength: {
type: "number"
}
}
}
},
includeMessageHistory: true,
outputMode: "last_message",
spawnerPrompt: `Spawn this agent between steps to prune context, starting with old tool results and then old messages.`,
systemPrompt: ``,
instructionsPrompt: ``,
stepPrompt: ``,
handleSteps: function* ({ agentState, params }) {
const messages = agentState.messageHistory, countTokensJson = (obj) => {
return Math.ceil(JSON.stringify(obj).length / 3);
}, maxMessageTokens = params?.maxContextLength ?? 200000;
let currentMessages = [...messages];
if (currentMessages.length > 0) {
const lastMessage = currentMessages[currentMessages.length - 1];
if (lastMessage.role === "assistant" && typeof lastMessage.content === "string") {
if (lastMessage.content.includes("spawn_agent_inline") && lastMessage.content.includes("context-pruner"))
currentMessages.pop();
}
}
if (countTokensJson(currentMessages) < maxMessageTokens) {
yield {
toolName: "set_messages",
input: {
messages: currentMessages
}
};
return;
}
let numKeptTerminalCommands = 0;
const afterTerminalPass = [];
for (let i = currentMessages.length - 1;i >= 0; i--) {
const message = currentMessages[i];
let processedContent = typeof message.content === "string" ? message.content : JSON.stringify(message.content);
if (processedContent.includes("<tool>run_terminal_command</tool>"))
if (numKeptTerminalCommands < 5) {
numKeptTerminalCommands++;
afterTerminalPass.unshift({ ...message, content: processedContent });
} else {
processedContent = processedContent.replace(/<tool_result>\s*<tool>run_terminal_command<\/tool>\s*<result>[\s\S]*?<\/result>\s*<\/tool_result>/g, "<tool_result><tool>run_terminal_command</tool><result>[Output omitted]</result></tool_result>");
afterTerminalPass.unshift({ ...message, content: processedContent });
}
else
afterTerminalPass.unshift({ ...message, content: processedContent });
}
if (countTokensJson(afterTerminalPass) < maxMessageTokens) {
yield {
toolName: "set_messages",
input: {
messages: afterTerminalPass
}
};
return;
}
const afterToolResultsPass = afterTerminalPass.map((message) => {
let processedContent = typeof message.content === "string" ? message.content : JSON.stringify(message.content);
if (processedContent.includes("<tool_result>") && processedContent.length > 1000)
processedContent = processedContent.replace(/<result>[\s\S]*?<\/result>/g, "<result>[Large tool result omitted]</result>");
return { ...message, content: processedContent };
});
if (countTokensJson(afterToolResultsPass) < maxMessageTokens) {
yield {
toolName: "set_messages",
input: {
messages: afterToolResultsPass
}
};
return;
}
const shortenedMessageTokenFactor = 0.5, replacementMessage = {
role: "user",
content: "<system>Previous message(s) omitted due to length</system>"
}, requiredTokens = countTokensJson(afterToolResultsPass.filter((m) => m.keepDuringTruncation));
let removedTokens = 0;
const tokensToRemove = (maxMessageTokens - requiredTokens) * (1 - shortenedMessageTokenFactor), placeholder = "deleted", filteredMessages = [];
for (const message of afterToolResultsPass) {
if (removedTokens >= tokensToRemove || message.keepDuringTruncation) {
filteredMessages.push(message);
continue;
}
removedTokens += countTokensJson(message);
if (filteredMessages.length === 0 || filteredMessages[filteredMessages.length - 1] !== placeholder) {
filteredMessages.push(placeholder);
removedTokens -= countTokensJson(replacementMessage);
}
}
yield {
toolName: "set_messages",
input: {
messages: filteredMessages.map((m) => m === placeholder ? replacementMessage : m)
}
};
}
}