service

package
v1.15.27 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 30, 2026 License: Apache-2.0 Imports: 58 Imported by: 0

Documentation

Overview

File: service/updater.go Provides core self-update logic using go-selfupdate.

Index

Constants

View Source
const (
	StartThinking = "Thinking ↓"
	InTheThinking = "Thinking..."
	EndThinking   = "✓"
)
View Source
const (
	CapabilityMCPServers      = "mcp_servers"
	CapabilityAgentSkills     = "agent_skills"
	CapabilityAgentMemory     = "agent_memory"
	CapabilityTokenUsage      = "token_usage"
	CapabilityMarkdown        = "markdown_output"
	CapabilitySubAgents       = "sub_agents"
	CapabilityAgentDelegation = "agent_delegation"
	CapabilityWebSearch       = "web_search"
	CapabilityAutoCompression = "auto_compression"
	CapabilityPlanMode        = "plan_mode"
)
View Source
const (
	CapabilityMCPTitle          = "MCP (Model Context Protocol)"
	CapabilitySkillsTitle       = "Agent Skills"
	CapabilityMemoryTitle       = "Agent Memory"
	CapabilitySubAgentsTitle    = "Sub Agents"
	CapabilityDelegationTitle   = "Agent Delegation"
	CapabilityWebSearchTitle    = "Web Search"
	CapabilityTokenUsageTitle   = "Token Usage"
	CapabilityMarkdownTitle     = "Markdown Output"
	CapabilityAutoCompressTitle = "Auto Compression"
	CapabilityPlanModeTitle     = "Plan Mode"

	CapabilityMCPTitleHighlight          = "[MCP (Model Context Protocol)]()"
	CapabilitySkillsTitleHighlight       = "[Agent Skills]()"
	CapabilityMemoryTitleHighlight       = "[Agent Memory]()"
	CapabilitySubAgentsTitleHighlight    = "[Sub Agents]()"
	CapabilityDelegationTitleHighlight   = "[Agent Delegation]()"
	CapabilityWebSearchTitleHighlight    = "[Web Search]()"
	CapabilityTokenUsageTitleHighlight   = "[Token Usage]()"
	CapabilityMarkdownTitleHighlight     = "[Markdown Output]()"
	CapabilityAutoCompressTitleHighlight = "[Auto Compression]()"
	CapabilityPlanModeTitleHighlight     = "[Plan Mode]()"

	CapabilityMCPBody          = "" /* 188-byte string literal not displayed */
	CapabilitySkillsBody       = "" /* 176-byte string literal not displayed */
	CapabilityMemoryBody       = "allows agents to remember important facts about you across sessions.\nFacts are used to personalize responses."
	CapabilitySubAgentsBody    = "" /* 132-byte string literal not displayed */
	CapabilityDelegationBody   = "" /* 169-byte string literal not displayed */
	CapabilityWebSearchBody    = "" /* 142-byte string literal not displayed */
	CapabilityTokenUsageBody   = "allows agents to track their token usage.\nThis helps you to control the cost of using the agent."
	CapabilityMarkdownBody     = "allows agents to generate final response in Markdown format.\nThis helps you to format the response in a more readable way."
	CapabilityAutoCompressBody = "" /* 175-byte string literal not displayed */
	CapabilityPlanModeBody     = "allows agents to plan their work before executing tasks.\nUse for deepresearch, complex tasks, or collaborative work"

	CapabilityMCPDescription          = CapabilityMCPTitle + " " + CapabilityMCPBody
	CapabilitySkillsDescription       = CapabilitySkillsTitle + " " + CapabilitySkillsBody
	CapabilityMemoryDescription       = CapabilityMemoryTitle + " " + CapabilityMemoryBody
	CapabilitySubAgentsDescription    = CapabilitySubAgentsTitle + " " + CapabilitySubAgentsBody
	CapabilityDelegationDescription   = CapabilityDelegationTitle + " " + CapabilityDelegationBody
	CapabilityWebSearchDescription    = CapabilityWebSearchTitle + " " + CapabilityWebSearchBody
	CapabilityTokenUsageDescription   = CapabilityTokenUsageTitle + " " + CapabilityTokenUsageBody
	CapabilityMarkdownDescription     = CapabilityMarkdownTitle + " " + CapabilityMarkdownBody
	CapabilityAutoCompressDescription = CapabilityAutoCompressTitle + " " + CapabilityAutoCompressBody
	CapabilityPlanModeDescription     = CapabilityPlanModeTitle + " " + CapabilityPlanModeBody

	// Agent Features Description Highlight
	CapabilityMCPDescriptionHighlight          = CapabilityMCPTitleHighlight + CapabilityMCPBody
	CapabilitySkillsDescriptionHighlight       = CapabilitySkillsTitleHighlight + CapabilitySkillsBody
	CapabilityMemoryDescriptionHighlight       = CapabilityMemoryTitleHighlight + CapabilityMemoryBody
	CapabilitySubAgentsDescriptionHighlight    = CapabilitySubAgentsTitleHighlight + CapabilitySubAgentsBody
	CapabilityDelegationDescriptionHighlight   = CapabilityDelegationTitleHighlight + CapabilityDelegationBody
	CapabilityWebSearchDescriptionHighlight    = CapabilityWebSearchTitleHighlight + CapabilityWebSearchBody
	CapabilityTokenUsageDescriptionHighlight   = CapabilityTokenUsageTitleHighlight + CapabilityTokenUsageBody
	CapabilityMarkdownDescriptionHighlight     = CapabilityMarkdownTitleHighlight + CapabilityMarkdownBody
	CapabilityAutoCompressDescriptionHighlight = CapabilityAutoCompressTitleHighlight + CapabilityAutoCompressBody
	CapabilityPlanModeDescriptionHighlight     = CapabilityPlanModeTitleHighlight + CapabilityPlanModeBody
)
View Source
const (
	UserCancelCommon        = "[Operation Cancelled]"
	UserCancelReasonUnknown = "Unknown"
	UserCancelReasonTimeout = "Timeout"
	UserCancelReasonDeny    = "User denied execution."
	UserCancelReasonCancel  = "User canceled execution."
)
View Source
const (
	RemoteModelsIndexURL = "https://raw.githubusercontent.com/activebook/models/main/list.json"
	RemoteModelsBaseURL  = "https://raw.githubusercontent.com/activebook/models/main/"
)
View Source
const (
	PluginVSCodeCompanion      = "vscode-companion"
	PluginVSCodeCompanionTitle = "VS Code Companion"
	PluginVSCodeCompanionDesc  = "View and accept file changes suggested by gllm directly within VSCode — with native inline diffs."
)

PluginVSCodeCompanion is the canonical plugin ID for the VSCode Companion integration.

View Source
const (
	ActionPreview companionAction = "preview"
	ActionSaved   companionAction = "saved"
	ActionDiscard companionAction = "discard"
	ActionContext companionAction = "context"
)
View Source
const (
	// Model types
	ModelProviderGemini           string = "gemini" // for google gemini models
	ModelProviderOpenAI           string = "openai"
	ModelProviderOpenAICompatible string = "openai-compatible"
	ModelProviderAnthropic        string = "anthropic" // for anthropic models (official sdk)
	ModelProviderUnknown          string = "unknown"
)
View Source
const (
	TavilyUrl          = "https://api.tavily.com/search"
	GoogleSearchEngine = "google"
	BingSearchEngine   = "bing"
	TavilySearchEngine = "tavily"
	NoneSearchEngine   = "none"
)
View Source
const (
	MainSessionName      = "main"
	SessionFileExtension = ".jsonl"
)
View Source
const (
	CharsPerTokenEnglish  = 4.0 // Average for English text
	CharsPerTokenChinese  = 2.5 // Tuned: 3 bytes/char / 2.5 = 1.2 tokens/char (balanced)
	CharsPerTokenJapanese = 2.0 // 3 bytes / 2.0 = 1.5 tokens/char
	CharsPerTokenKorean   = 2.0 // 3 bytes / 2.0 = 1.5 tokens/char
	CharsPerTokenCode     = 3.5 // Tuned: Code is dense. 3.5 chars/token.
	CharsPerTokenJSON     = 3.7 // JSON: Typically 3.5-4 characters per token. Tuned: 3.7 chars/token.
	CharsPerTokenDefault  = 4.0 // Default fallback
	MessageOverheadTokens = 3   // Standard overhead per message (<|start|>role and <|end|>)
	ToolCallOverhead      = 24  // Reduced from 100 to 24 (closer to reality for JSON overhead)

	// Media Token Costs (Heuristics)
	// 1MB = 1000 tokens
	TokenCostImageDefault = 1000 // Safe upper bound average for high-res images (OpenAI high detail is ~1105, low is 85)
	TokenCostImageGemini  = 1000 // Fixed cost for Gemini images <= 384px (often tiled, but 258 is the base unit)

	// Video/Audio Heuristics (Tokens per MB - heavily estimated as we don't have duration)
	// Assumptions:
	// - Video: 2Mbps (.25MB/s). 1MB = 4s. Gemini Video: 263 tokens/s. 4s * 263 = 1052 tokens.
	// - Audio: 128kbps (16KB/s). 1MB = 64s. Gemini Audio: 32 tokens/s. 64s * 32 = 2048 tokens.
	TokenCostVideoPerMBGemini   = 1000
	TokenCostVideoPerMBOpenChat = 1000 // For base64 encoded video
	TokenCostAudioPerMBGemini   = 2000
)

Token estimation constants These are refined based on modern tokenizer behavior (cl100k_base, qwen, etc.):

  • English: ~4 chars/token (ASCII)
  • Chinese: ~0.6-2.0 tokens/char (Qwen is efficient, OpenAI is 2.0). due to the different tokenization methods used by different models, the conversion ratios can vary We use 2.5 bytes/token => ~1.2 tokens/char as a balanced estimate.
  • Japanese/Korean: ~1.5 tokens/char. 3 bytes/char / 2.0 => 1.5 tokens/char.
  • Tool Calls: JSON structure overhead is small (~20 tokens), not 100.
View Source
const (
	ToolTypeFunction ToolType = "function"

	// Tool Names
	ToolShell             = "shell"
	ToolReadFile          = "read_file"
	ToolWriteFile         = "write_file"
	ToolEditFile          = "edit_file"
	ToolDeleteFile        = "delete_file"
	ToolCreateDirectory   = "create_directory"
	ToolListDirectory     = "list_directory"
	ToolDeleteDirectory   = "delete_directory"
	ToolMove              = "move"
	ToolCopy              = "copy"
	ToolSearchFiles       = "search_files"
	ToolSearchTextInFile  = "search_text_in_file"
	ToolReadMultipleFiles = "read_multiple_files"
	ToolWebFetch          = "web_fetch"
	ToolSwitchAgent       = "switch_agent"
	ToolBuildAgent        = "build_agent"
	ToolAskUser           = "ask_user"
	ToolWebSearch         = "web_search"
	ToolActivateSkill     = "activate_skill"
	ToolListMemory        = "list_memory"
	ToolSaveMemory        = "save_memory"
	ToolListAgent         = "list_agent"
	ToolSpawnSubAgents    = "spawn_subagents"
	ToolGetState          = "get_state"
	ToolSetState          = "set_state"
	ToolListState         = "list_state"
	ToolExitPlanMode      = "exit_plan_mode"
	ToolEnterPlanMode     = "enter_plan_mode"
)
View Source
const (
	// ToolRespConfirmShell is the template for the response to the user before executing a command.
	ToolRespConfirmShell = "```\n%s\n```\n%s"

	// ToolRespShellOutput is the template for the response to the user after executing a command.
	ToolRespShellOutput = `shell executed: %s
Status:
%s
%s`

	ToolUserConfirmPrompt = "Do you want to proceed?"

	// ToolRespConfirmEdityFile is the template for the response to the user before modifying a file, including the diff.
	ToolRespDiscardEditFile = "Based on your request, the OPERATION is CANCELLED: " +
		"Cancel edit file: %s\n" +
		"The user has explicitly declined to apply these file edits. The file will remain unchanged. Do not proceed with any file modifications or ask for further confirmation without explicit new user instruction."
)
View Source
const (
	DefaultShellTimeout = 60 * time.Second
	MaxFileSize         = 20 * 1024 * 1024 // 20MB
)

Tool robustness constants

View Source
const (
	CachedTokensInPrompt    = true
	CachedTokensNotInPrompt = false
)
View Source
const CompressedContextAck = "Context compressed successfully. I have read the summary and am ready to continue."
View Source
const CompressedContextPrefix = "Here is the compressed context of our session:\n\n"
View Source
const CompressionPromptFormat = `Please compress the session history above according to your system instructions.`
View Source
const CompressionSystemPrompt = `` /* 804-byte string literal not displayed */
View Source
const DefaultMaxCacheSize = 10000

DefaultMaxCacheSize is the default maximum number of entries in the cache

View Source
const (

	// UpdateCheckTimeout is the maximum time budget for a remote version check.
	UpdateCheckTimeout = 10 * time.Second
)

Variables

View Source
var (
	// RoleColors for message roles (initialized in init)
	RoleColors map[string]string

	// ContentTypeColors for special content (initialized in init)
	ContentTypeColors map[string]string
)
View Source
var DefaultModelLimits = ModelLimits{

	ContextWindow:   128000,
	MaxOutputTokens: 8192,
}
View Source
var ExecutorPath string

ExecutorPath is the path to the executable to run for filtering. Defaults to os.Executable(). Can be overridden for testing.

Functions

func AppendAgentDelegationTools added in v1.15.18

func AppendAgentDelegationTools(tools []string) []string

AppendAgentDelegationTools appends agent delegation tools to the given tools slice if they are not already present.

func AppendMemoryTools added in v1.13.18

func AppendMemoryTools(tools []string) []string

AppendMemoryTools appends memory tools to the given tools slice if they are not already present.

func AppendPlanTools added in v1.14.26

func AppendPlanTools(tools []string) []string

AppendPlanTools appends plan tools to the given tools slice if they are not already present.

func AppendSearchTools added in v1.14.1

func AppendSearchTools(tools []string) []string

AppendSearchTools appends search tools to the given tools slice if they are not already present.

func AppendSkillTools added in v1.13.18

func AppendSkillTools(tools []string) []string

AppendSkillTools appends skill tools to the given tools slice if they are not already present.

func AppendSubagentTools added in v1.13.18

func AppendSubagentTools(tools []string) []string

AppendSubagentTools appends subagent tools to the given tools slice if they are not already present.

func ApplyUpdate added in v1.14.24

func ApplyUpdate(release *ReleaseInfo) error

ApplyUpdate downloads and replaces the running binary in place. The process must be restarted for the new version to take effect.

func AvailableAgentDelegationTool added in v1.15.18

func AvailableAgentDelegationTool(toolName string) bool

AvailableAgentDelegationTool checks if a tool is available in the agent delegation tools.

func AvailableEmbeddingTool added in v1.9.2

func AvailableEmbeddingTool(toolName string) bool

AvailableEmbeddingTool checks if a tool is available in the embedding tools.

func AvailableMemoryTool added in v1.13.18

func AvailableMemoryTool(toolName string) bool

AvailableMemoryTool checks if a tool is available in the memory tools.

func AvailablePlanTool added in v1.14.26

func AvailablePlanTool(toolName string) bool

AvailablePlanTool checks if a tool is available in the plan tools.

func AvailableSearchTool added in v1.9.12

func AvailableSearchTool(toolName string) bool

AvailableSearchTool checks if a tool is available in the search tools.

func AvailableSkillTool added in v1.13.18

func AvailableSkillTool(toolName string) bool

AvailableSkillTool checks if a tool is available in the skill tools.

func AvailableSubagentTool added in v1.13.18

func AvailableSubagentTool(toolName string) bool

AvailableSubagentTool checks if a tool is available in the subagent tools.

func BuildAnthropicMessages added in v1.13.10

func BuildAnthropicMessages(messages []UniversalMessage) []anthropic.MessageParam

BuildAnthropicMessages converts universal messages to Anthropic format. Handles: System role is inlined into the first user message. Preserves: OfText, OfThinking blocks

func BuildCompressedSession added in v1.14.24

func BuildCompressedSession(summary string, provider string) ([]byte, error)

BuildCompressedSession constructs a new 2-message JSONL session from the summary, formatted for the specified provider. User provides the summary, assistant acknowledges.

func BuildGeminiMessages added in v1.13.10

func BuildGeminiMessages(messages []UniversalMessage) []*gemini.Content

BuildGeminiMessages converts universal messages to Gemini format. Handles: System role is inlined into the first user message. Preserves: Parts with Text, Thought Maps: "assistant" → "model"

func BuildOpenAIMessages added in v1.13.10

func BuildOpenAIMessages(messages []UniversalMessage) []openai.ChatCompletionMessageParamUnion

BuildOpenAIMessages converts universal messages to OpenAI format. Preserves: system role, Content, ReasoningContent

func BuildOpenChatMessages added in v1.13.10

func BuildOpenChatMessages(messages []UniversalMessage) []*model.ChatCompletionMessage

BuildOpenChatMessages converts universal messages to OpenChat (Volcengine) format. Preserves: system role, Content, ReasoningContent

func CallAgent added in v1.9.4

func CallAgent(op *AgentOptions) error

func CheckIfImageFromBytes

func CheckIfImageFromBytes(data []byte) (bool, string, error)

func CheckIfImageFromPath

func CheckIfImageFromPath(filePath string) (bool, string, error)

checkIfImage attempts to decode a file as an image

func CheckSessionFormat added in v1.15.21

func CheckSessionFormat(agent *data.AgentConfig, sessionData []byte) (isCompatible bool, provider string, modelProvider string)

CheckSessionFormat verifies if the session data is compatible with the agent's provider.

func CheckToolPermission added in v1.14.26

func CheckToolPermission(toolName string, args *map[string]interface{}) error

CheckToolPermission checks if the tool is allowed to be executed in the current mode

func ClearEmptySessionsAsync added in v1.14.24

func ClearEmptySessionsAsync()

ClearEmptySessionsAsync clears all empty sessions in background An empty session is a folder whose main.jsonl file is empty or missing.

func ClearSession added in v1.15.21

func ClearSession(name string) error

ClearSession deletes the specific session or subagent file. Passing only the top-level name "my_session" will only delete "main.jsonl".

func ClearTokenCache added in v1.12.14

func ClearTokenCache()

ClearTokenCache clears the global token cache (useful for testing)

func CompressSession added in v1.14.24

func CompressSession(modelConfig *data.AgentConfig, sessionData []byte) (string, error)

CompressSession takes the raw session JSONL bytes and the active agent, and returns a compressed summary string using the active provider's non-streaming API. No need to preserve the latest user message, because it's coming from /compress command.

func ConstructSystemPrompt added in v1.14.27

func ConstructSystemPrompt(prompt string, capabilities []string) string

ConstructSystemPrompt constructs the system prompt by injecting memory and skills into the prompt

func ConvertMessages added in v1.13.10

func ConvertMessages(data []byte, sourceProvider, targetProvider string) ([]byte, error)

ConvertMessages parses source provider data and builds target provider messages. Returns the converted data encoded as JSON.

Supported source/target providers: - ModelProviderOpenAI - ModelProviderOpenAICompatible (OpenChat) - ModelProviderAnthropic - ModelProviderGemini

func DetectAnthropicKeyMessage added in v1.13.10

func DetectAnthropicKeyMessage(msg *anthropic.MessageParam) bool

Detects if a message is definitely an Anthropic message

func DetectGeminiKeyMessage added in v1.13.10

func DetectGeminiKeyMessage(msg *gemini.Content) bool

Detects if a message is definitely a Gemini message

func DetectMessageProvider added in v1.5.1

func DetectMessageProvider(path string) string

Detects the session provider based on message format using a scanner This is more efficient for large files as it doesn't read the entire file into memory

func DetectMessageProviderByContent added in v1.14.14

func DetectMessageProviderByContent(input []byte) string

* Detects the session provider based on message format. * Supports both JSONL (preferred) and legacy JSON array formats.

func DetectMessageProviderFromLine added in v1.14.14

func DetectMessageProviderFromLine(line []byte) string

DetectMessageProviderFromLine detects the provider of a message from its JSON representation.

func DetectMessageProviderFromRaw added in v1.15.12

func DetectMessageProviderFromRaw(raw map[string]interface{}) string

DetectMessageProviderFromRaw performs a fast-path check using a raw JSON map. This is used for performance (avoiding multi-type reflection unmarshals) and for detecting vendor-specific keys that aren't in standard SDK schemas.

func DetectModelProvider added in v1.6.0

func DetectModelProvider(endPoint string, modelName string) string

DetectModelProvider detects the model provider based on endpoint and model name. It first checks the endpoint domain, then falls back to model name patterns. This dual detection handles Chinese models hosted on US platforms (AWS, CoreWeave, etc.)

func DetectOpenAIKeyMessage added in v1.13.10

func DetectOpenAIKeyMessage(msg *openai.ChatCompletionMessageParamUnion) bool

Detects if a message is definitely an OpenAI message

func DisableAgentDelegation added in v1.15.18

func DisableAgentDelegation(capabilities []string) []string

func DisableAgentMemory added in v1.13.18

func DisableAgentMemory(capabilities []string) []string

func DisableAgentSkills added in v1.13.18

func DisableAgentSkills(capabilities []string) []string

func DisableAutoCompression added in v1.14.22

func DisableAutoCompression(capabilities []string) []string

func DisableCodeExecution added in v1.7.1

func DisableCodeExecution()

func DisableMCPServers added in v1.13.18

func DisableMCPServers(capabilities []string) []string

func DisableMarkdown added in v1.13.18

func DisableMarkdown(capabilities []string) []string

func DisablePlanMode added in v1.14.26

func DisablePlanMode(capabilities []string) []string

func DisableSubAgents added in v1.13.18

func DisableSubAgents(capabilities []string) []string

func DisableTokenUsage added in v1.13.18

func DisableTokenUsage(capabilities []string) []string

func DisableWebSearch added in v1.14.1

func DisableWebSearch(capabilities []string) []string

func EnableAgentDelegation added in v1.15.18

func EnableAgentDelegation(capabilities []string) []string

func EnableAgentMemory added in v1.13.18

func EnableAgentMemory(capabilities []string) []string

func EnableAgentSkills added in v1.13.18

func EnableAgentSkills(capabilities []string) []string

func EnableAutoCompression added in v1.14.22

func EnableAutoCompression(capabilities []string) []string

func EnableCodeExecution added in v1.7.1

func EnableCodeExecution()

func EnableMCPServers added in v1.13.18

func EnableMCPServers(capabilities []string) []string

func EnableMarkdown added in v1.13.18

func EnableMarkdown(capabilities []string) []string

func EnablePlanMode added in v1.14.26

func EnablePlanMode(capabilities []string) []string

func EnableSubAgents added in v1.13.18

func EnableSubAgents(capabilities []string) []string

func EnableTokenUsage added in v1.13.18

func EnableTokenUsage(capabilities []string) []string

func EnableWebSearch added in v1.14.1

func EnableWebSearch(capabilities []string) []string

func EnsureSessionCompatibility added in v1.15.21

func EnsureSessionCompatibility(agent *data.AgentConfig, sessionName string) error

EnsureSessionCompatibility checks if the existing session is compatible with the current agent's provider. If not, it attempts to convert the session history.

func EstimateAnthropicMessageTokens added in v1.13.5

func EstimateAnthropicMessageTokens(msg anthropic.MessageParam) int

EstimateAnthropicMessageTokens estimates tokens for an Anthropic message.

func EstimateAnthropicMessagesTokens added in v1.13.5

func EstimateAnthropicMessagesTokens(messages []anthropic.MessageParam) int

EstimateAnthropicMessagesTokens estimates total tokens for a slice of Anthropic messages.

func EstimateAnthropicToolTokens added in v1.13.5

func EstimateAnthropicToolTokens(tools []anthropic.ToolUnionParam) int

EstimateAnthropicToolTokens estimates tokens for a slice of Anthropic tools.

func EstimateGeminiMessageTokens added in v1.12.14

func EstimateGeminiMessageTokens(msg *genai.Content) int

EstimateGeminiMessageTokens estimates tokens for a Gemini content message.

func EstimateGeminiMessagesTokens added in v1.12.14

func EstimateGeminiMessagesTokens(messages []*genai.Content) int

EstimateGeminiMessagesTokens estimates total tokens for a slice of Gemini messages.

func EstimateGeminiToolTokens added in v1.12.14

func EstimateGeminiToolTokens(tools []*genai.Tool) int

EstimateGeminiToolTokens estimates tokens for a slice of Gemini tools

func EstimateJSONTokens added in v1.12.14

func EstimateJSONTokens(data interface{}) int

EstimateJSONTokens estimates tokens for arbitrary JSON data. Useful for estimating tool results or complex structured content.

func EstimateOpenAIMessageTokens added in v1.12.14

func EstimateOpenAIMessageTokens(msg openai.ChatCompletionMessageParamUnion) int

EstimateOpenAIMessageTokens estimates tokens for an OpenAI chat message. This accounts for role tokens, content, and tool calls.

func EstimateOpenAIMessagesTokens added in v1.12.14

func EstimateOpenAIMessagesTokens(messages []openai.ChatCompletionMessageParamUnion) int

EstimateOpenAIMessagesTokens estimates total tokens for a slice of OpenAI messages.

func EstimateOpenAIToolTokens added in v1.12.14

func EstimateOpenAIToolTokens(tools []openai.ChatCompletionToolUnionParam) int

EstimateOpenAIToolTokens estimates tokens for a slice of OpenAI tools

func EstimateOpenChatMessageTokens added in v1.12.14

func EstimateOpenChatMessageTokens(msg *openchat.ChatCompletionMessage) int

EstimateOpenChatMessageTokens estimates tokens for an OpenChat (Volcengine) message.

func EstimateOpenChatMessagesTokens added in v1.12.14

func EstimateOpenChatMessagesTokens(messages []*openchat.ChatCompletionMessage) int

EstimateOpenChatMessagesTokens estimates total tokens for a slice of OpenChat messages.

func EstimateOpenChatToolTokens added in v1.12.14

func EstimateOpenChatToolTokens(tools []*openchat.Tool) int

EstimateOpenChatToolTokens estimates tokens for a slice of OpenChat tools

func EstimateTokens added in v1.12.14

func EstimateTokens(text string) int

EstimateTokens provides fast character-based estimation for text. This is approximately 90% accurate compared to tiktoken.

func ExportSession added in v1.15.21

func ExportSession(name, destPath string) error

ExportSession exports a session's main.jsonl to a destination path

func ExtractTextFromURL added in v1.6.14

func ExtractTextFromURL(ctx context.Context, url string, config *ExtractorConfig) ([]string, error)

ExtractTextFromURL fetches a URL and extracts the main text content Automatically detects content type and routes to appropriate handler: - text/plain, text/markdown: returns content directly - application/pdf: extracts text using PDF reader - text/html: parses and extracts text with boilerplate removal

func FilterOpenToolArguments added in v1.14.2

func FilterOpenToolArguments(argsMap map[string]interface{}, ignoreKeys []string) map[string]interface{}

func FindSessionByIndex added in v1.14.24

func FindSessionByIndex(idx string) (string, error)

FindSessionByIndex finds a session by index If the index is out of range, it returns an error If the index is valid, it returns the session name

func FindSessionsByPattern added in v1.15.21

func FindSessionsByPattern(pattern string) ([]string, error)

FindSessionsByPattern finds all sessions matching a given pattern (including index, exact name, or wildcard)

func GetAgentDelegationTools added in v1.15.18

func GetAgentDelegationTools() []string

func GetAllCapabilitiesDescription added in v1.15.23

func GetAllCapabilitiesDescription() string

GetAllCapabilitiesDescription returns all capabilities description.

func GetAllEmbeddingCapabilities added in v1.15.18

func GetAllEmbeddingCapabilities() []string

GetAllEmbeddingCapabilities returns all capabilities that are enabled by default.

func GetAllFeatureInjectedTools added in v1.15.18

func GetAllFeatureInjectedTools() []string

func GetAllOpenTools added in v1.14.2

func GetAllOpenTools() []string

func GetAnthropicMessageKey added in v1.13.5

func GetAnthropicMessageKey(msg anthropic.MessageParam) string

GetAnthropicMessageKey generates a cache key for an Anthropic message.

func GetCapabilityDescHighlight added in v1.15.18

func GetCapabilityDescHighlight(cap string) string

GetCapabilityDescHighlight returns the description of a capability with highlight. This is used for the dynamic note in the capabilities switch.

func GetCapabilityDescription added in v1.15.18

func GetCapabilityDescription(cap string) string

GetCapabilityDescription returns the description of a capability.

func GetCapabilityTitle added in v1.15.18

func GetCapabilityTitle(cap string) string

GetCapabilityTitle returns the title of a capability.

func GetDefaultSearchEngineName added in v1.6.0

func GetDefaultSearchEngineName() string

func GetEmbeddingTools added in v1.15.18

func GetEmbeddingTools() []string

func GetGeminiMessageKey added in v1.12.14

func GetGeminiMessageKey(msg *genai.Content) string

GetGeminiMessageKey generates a cache key for a Gemini message.

func GetMIMEType added in v1.4.0

func GetMIMEType(filePath string) string

func GetMIMETypeByContent added in v1.4.0

func GetMIMETypeByContent(data []byte) string

func GetMemoryTools added in v1.15.18

func GetMemoryTools() []string

func GetNoneSearchEngineName added in v1.6.2

func GetNoneSearchEngineName() string

func GetOpenAIMessageKey added in v1.12.14

func GetOpenAIMessageKey(msg openai.ChatCompletionMessageParamUnion) string

GetOpenAIMessageKey generates a cache key for an OpenAI message by JSON marshaling. This captures ALL fields (Content, ReasoningContent, ToolCalls, MultiContent, etc.) ensuring different messages never produce the same key.

func GetOpenChatMessageKey added in v1.12.14

func GetOpenChatMessageKey(msg *model.ChatCompletionMessage) string

GetOpenChatMessageKey generates a cache key for an OpenChat (Volcengine) message.

func GetPlanModeTools added in v1.15.18

func GetPlanModeTools() []string

func GetSearchTools added in v1.15.18

func GetSearchTools() []string

func GetSessionFilePath added in v1.15.22

func GetSessionFilePath(name string) string

GetSessionFilePath returns the absolute file path for a session's specific jsonl file. If name is "sessionA::taskB", it returns "sessions/sessionA/taskB.jsonl" If name is "sessionA", it returns "sessions/sessionA/main.jsonl"

func GetSessionMainFilePath added in v1.15.21

func GetSessionMainFilePath(name string) string

GetSessionMainFilePath returns the absolute file path for a session's main.jsonl

func GetSessionPath added in v1.15.21

func GetSessionPath(name string) string

GetSessionPath returns the absolute directory path for a session

func GetSessionsDir added in v1.14.26

func GetSessionsDir() string

func GetSkillTools added in v1.15.18

func GetSkillTools() []string

func GetSubagentTools added in v1.15.18

func GetSubagentTools() []string

func GetVSCodeContext added in v1.15.27

func GetVSCodeContext() string

GetVSCodeContext formats the current VSCode state into a JSON block suitable for LLM injection.

func IsAgentDelegationEnabled added in v1.15.18

func IsAgentDelegationEnabled(capabilities []string) bool

* Agent Delegation

func IsAgentMemoryEnabled added in v1.13.18

func IsAgentMemoryEnabled(capabilities []string) bool

* Agent Memory

func IsAgentSkillsEnabled added in v1.13.18

func IsAgentSkillsEnabled(capabilities []string) bool

* Agent Skills

func IsAudioMIMEType added in v1.7.1

func IsAudioMIMEType(mimeType string) bool

func IsAutoCompressionEnabled added in v1.14.22

func IsAutoCompressionEnabled(capabilities []string) bool

* Auto Compression

func IsAvailableMCPTool added in v1.14.2

func IsAvailableMCPTool(toolName string, client *MCPClient) bool

IsAvailableMCPTool checks if a tool is available in the MCP tools.

func IsAvailableOpenTool added in v1.14.2

func IsAvailableOpenTool(toolName string) bool

IsAvailableTool checks if a tool is available for the current agent. It checks if the tool is available in the embedding tools, search tools, skill tools, memory tools, subagent tools, agent delegation tools, or MCP tools.

func IsCodeExecutionEnabled added in v1.7.1

func IsCodeExecutionEnabled() bool

func IsExcelMIMEType added in v1.4.0

func IsExcelMIMEType(mimeType string) bool

func IsImageMIMEType added in v1.4.0

func IsImageMIMEType(mimeType string) bool

func IsMCPServersEnabled added in v1.13.18

func IsMCPServersEnabled(capabilities []string) bool

* MCP Servers

func IsMarkdownEnabled added in v1.13.18

func IsMarkdownEnabled(capabilities []string) bool

* Markdown

func IsModelGemini3 added in v1.13.5

func IsModelGemini3(modelName string) bool

IsModelGemini3 checks if the model name is a Gemini 3 model

func IsPDFMIMEType added in v1.4.0

func IsPDFMIMEType(mimeType string) bool

func IsPlanModeEnabled added in v1.14.26

func IsPlanModeEnabled(capabilities []string) bool

* Plan Mode

func IsStdinPipe added in v1.4.0

func IsStdinPipe(source string) bool

func IsSubAgentsEnabled added in v1.13.18

func IsSubAgentsEnabled(capabilities []string) bool

* Sub Agents

func IsSwitchAgentError added in v1.13.10

func IsSwitchAgentError(err error) bool

func IsTextMIMEType added in v1.4.0

func IsTextMIMEType(mimeType string) bool

func IsTokenUsageEnabled added in v1.13.18

func IsTokenUsageEnabled(capabilities []string) bool

* Token Usage

func IsUnknownMIMEType added in v1.4.0

func IsUnknownMIMEType(mimeType string) bool

func IsUserCancelError added in v1.14.13

func IsUserCancelError(err error) bool

func IsVideoMIMEType added in v1.13.1

func IsVideoMIMEType(mimeType string) bool

func IsWebSearchEnabled added in v1.14.1

func IsWebSearchEnabled(capabilities []string) bool

* Web Search

func NormalizeModelName added in v1.15.1

func NormalizeModelName(configModelName string) string

NormalizeModelName extracts the actual model name from a config string that might include a vendor prefix e.g. "xiaomi/mimo-v2-flash:free" -> "mimo-v2-flash:free"

func Ptr added in v1.7.4

func Ptr[T any](t T) *T

func ReadSessionContent added in v1.15.21

func ReadSessionContent(name string) ([]byte, error)

ReadSessionContent reads the contents of a session or subagent jsonl file

func RemoveAgentDelegationTools added in v1.15.18

func RemoveAgentDelegationTools(tools []string) []string

RemoveAgentDelegationTools removes agent delegation tools from the given tools slice.

func RemoveMemoryTools added in v1.13.18

func RemoveMemoryTools(tools []string) []string

RemoveMemoryTools removes memory tools from the given tools slice.

func RemovePlanTools added in v1.14.26

func RemovePlanTools(tools []string) []string

RemovePlanTools removes plan tools from the given tools slice.

func RemoveSearchTools added in v1.14.1

func RemoveSearchTools(tools []string) []string

RemoveSearchTools removes search tools from the given tools slice.

func RemoveSession added in v1.15.21

func RemoveSession(name string) error

RemoveSession deletes an entire session directory or a specific subagent file

func RemoveSkillTools added in v1.13.18

func RemoveSkillTools(tools []string) []string

RemoveSkillTools removes skill tools from the given tools slice.

func RemoveSubagentTools added in v1.13.18

func RemoveSubagentTools(tools []string) []string

RemoveSubagentTools removes subagent tools from the given tools slice.

func RenameSession added in v1.15.21

func RenameSession(oldName, newName string) error

RenameSession renames an existing session directory or subagent file

func RenderAnthropicSessionLog added in v1.14.24

func RenderAnthropicSessionLog(input []byte) string

RenderAnthropicSessionLog returns a string summary of Anthropic session (JSONL or JSON array format)

func RenderGeminiSessionLog added in v1.14.24

func RenderGeminiSessionLog(input []byte) string

RenderGeminiSessionLog returns a string summary of Gemini session (JSONL or JSON array format)

func RenderOpenAISessionLog added in v1.14.24

func RenderOpenAISessionLog(input []byte) string

RenderOpenAISessionLog returns a string summary of OpenAI session (JSONL or JSON array format)

func SendVSCodeDiscard added in v1.15.25

func SendVSCodeDiscard(filePath string)

SendVSCodeDiscard notifies VSCode that the change was cancelled, reverting any dirty buffer.

func SendVSCodePreview added in v1.15.25

func SendVSCodePreview(filePath, newContent string)

SendVSCodePreview sends the proposed file changes to VSCode for inline diffing before confirmation.

func SendVSCodeSaved added in v1.15.25

func SendVSCodeSaved(filePath string)

SendVSCodeSaved notifies VSCode that the file was successfully written to disk, permitting a clean reload.

func SessionExists added in v1.15.21

func SessionExists(name string, checkSubAgent bool) bool

SessionExists checks if a top-level session folder exists, or if a subagent file exists

func SyncModelLimits added in v1.15.1

func SyncModelLimits(modelKey, configModelName string)

SyncModelLimits fetches the latest model constraints from the remote repository and updates the local config entry if valid info is found. This function operates asynchronously and debounces multiple calls for the same key.

func WriteSessionContent added in v1.15.21

func WriteSessionContent(name string, data []byte) error

WriteSessionContent writes the data into a session or subagent jsonl file

Types

type ActiveAgent added in v1.15.23

type ActiveAgent struct {
	Name     string
	Config   *data.AgentConfig
	TaskChan chan AgentMessage // event loop inbox
	// contains filtered or unexported fields
}

ActiveAgent is a persistent, resident subagent actor.

type ActiveEditor added in v1.15.27

type ActiveEditor struct {
	FilePath   string `json:"filePath"`
	LanguageId string `json:"languageId"`
	IsDirty    bool   `json:"isDirty"`
	// Content        string            `json:"content"`
	Selections     []EditorSelection `json:"selections"`
	CursorPosition EditorPosition    `json:"cursorPosition"`
}

ActiveEditor represents the active editor in VSCode We don't need Content and VisibleRanges for now For Content, model can use filePath to read the file For VisibleRanges, model can use cursorPosition to infer the visible range

type Agent added in v1.9.4

type Agent struct {
	Model           *ModelInfo
	SystemPrompt    string
	UserPrompt      string
	Files           []*FileData         // Attachment files
	NotifyChan      chan<- StreamNotify // Sub Channel to send notifications
	DataChan        chan<- StreamData   // Sub Channel to receive streamed text data
	ProceedChan     <-chan bool         // Sub Channel to receive proceed signal
	SearchEngine    *SearchEngine       // Search engine name
	ToolsUse        data.ToolsUse       // Use tools
	EnabledTools    []string            // List of enabled embedding tools
	UseCodeTool     bool                // Use code tool
	ThinkingLevel   ThinkingLevel       // Thinking level: off, low, medium, high
	MCPClient       *MCPClient          // MCP client for MCP tools
	MaxRecursions   int                 // Maximum number of recursions for model calls
	Markdown        *Markdown           // Markdown renderer
	TokenUsage      *TokenUsage         // Token usage metainfo
	StdOutput       io.Output           // Standard I/O
	FileOutput      io.Output           // File I/O
	Status          StatusStack         // Stack to manage streaming status
	Session         Session             // Session
	Context         ContextManager      // Context manager
	LastWrittenData string              // Last written data

	// Sub-agent orchestration
	SharedState *data.SharedState // Shared state for inter-agent communication
	AgentName   string            // Current agent name for metadata tracking
	ModelName   string            // Current model name of current agent (agent model key)
	Verbose     bool              // Whether verbose output mode is enabled
}

func (*Agent) CompleteReasoning added in v1.9.7

func (ag *Agent) CompleteReasoning()

func (*Agent) Error added in v1.9.7

func (ag *Agent) Error(text string)

func (*Agent) GenerateAnthropicStream added in v1.13.5

func (ag *Agent) GenerateAnthropicStream() error

GenerateAnthropicStream generates a streaming response using Anthropic API

func (*Agent) GenerateAnthropicSync added in v1.14.22

func (ag *Agent) GenerateAnthropicSync(messages []anthropic.MessageParam, systemPrompt string) (string, error)

GenerateAnthropicSync generates a single, non-streaming completion using Anthropic API. This is used for background tasks like context compression where streaming is unnecessary. systemPrompt is the system prompt to be used for the sync generation, it's majorly a role. the last message is the user prompt to do the task.

func (*Agent) GenerateGeminiStream added in v1.13.14

func (ag *Agent) GenerateGeminiStream() error

func (*Agent) GenerateGeminiSync added in v1.14.22

func (ag *Agent) GenerateGeminiSync(messages []*genai.Content, systemPrompt string) (string, error)

GenerateGeminiSync generates a single, non-streaming completion using the Gemini API. This is used for background tasks like context compression where streaming is unnecessary. systemPrompt is the system prompt to be used for the sync generation, it's majorly a role. the last message is the user prompt to do the task.

func (*Agent) GenerateOpenAIStream added in v1.10.4

func (ag *Agent) GenerateOpenAIStream() error

GenerateOpenAIStream generates a streaming response using OpenAI API

func (*Agent) GenerateOpenAISync added in v1.14.22

func (ag *Agent) GenerateOpenAISync(messages []openai.ChatCompletionMessageParamUnion, systemPrompt string) (string, error)

GenerateOpenAISync generates a single, non-streaming completion using OpenAI API. This is used for background tasks like context compression where streaming is unnecessary. systemPrompt is the system prompt to be used for the sync generation, it's majorly a role. the last message is the user prompt to do the task.

func (*Agent) GenerateOpenChatStream added in v1.9.4

func (ag *Agent) GenerateOpenChatStream() error

In current openchat api, we can't use cached tokens The context api and response api are not available for current golang lib

func (*Agent) GenerateOpenChatSync added in v1.14.22

func (ag *Agent) GenerateOpenChatSync(messages []*model.ChatCompletionMessage, systemPrompt string) (string, error)

GenerateOpenChatSync generates a single, non-streaming completion using the Volcengine API. This is used for background tasks like context compression where streaming is unnecessary. systemPrompt is the system prompt to be used for the sync generation, it's majorly a role. the last message is the user prompt to do the task.

func (*Agent) SortAnthropicMessagesByOrder added in v1.13.5

func (ag *Agent) SortAnthropicMessagesByOrder() error

func (*Agent) SortGeminiMessagesByOrder added in v1.14.13

func (ag *Agent) SortGeminiMessagesByOrder() error

func (*Agent) SortOpenAIMessagesByOrder added in v1.12.20

func (ag *Agent) SortOpenAIMessagesByOrder() error

* Sort the messages by order: * History (user/assistant/tool turns only — no system message) * The system prompt is never persisted; it is injected fresh in process().

func (*Agent) SortOpenChatMessagesByOrder added in v1.12.20

func (ag *Agent) SortOpenChatMessagesByOrder() error

* Sort the messages by order: * History (user/assistant/tool turns only — no system message) * The system prompt is never persisted; it is injected fresh in process().

func (*Agent) StartIndicator added in v1.9.7

func (ag *Agent) StartIndicator(text string)

func (*Agent) StartReasoning added in v1.9.7

func (ag *Agent) StartReasoning()

StartReasoning notifies the user and logs to file that the agent has started thinking. It writes a status message to both Std and OutputFile if they are available.

func (*Agent) StopIndicator added in v1.9.7

func (ag *Agent) StopIndicator()

func (*Agent) Warn added in v1.9.7

func (ag *Agent) Warn(text string)

func (*Agent) WriteDiffConfirm added in v1.11.10

func (ag *Agent) WriteDiffConfirm(text string)

func (*Agent) WriteEnd added in v1.9.7

func (ag *Agent) WriteEnd()

func (*Agent) WriteFunctionCall added in v1.9.7

func (ag *Agent) WriteFunctionCall(text string)

func (*Agent) WriteFunctionCallOver added in v1.14.8

func (ag *Agent) WriteFunctionCallOver()

func (*Agent) WriteMarkdown added in v1.9.7

func (ag *Agent) WriteMarkdown()

func (*Agent) WriteReasoning added in v1.9.7

func (ag *Agent) WriteReasoning(text string)

WriteReasoning writes the provided reasoning text to both the standard output and an output file, applying specific formatting to each if they are available.

func (*Agent) WriteText added in v1.9.7

func (ag *Agent) WriteText(text string)

WriteText writes the given text to the Agent's Std, Markdown, and OutputFile writers if they are set.

func (*Agent) WriteUsage added in v1.9.7

func (ag *Agent) WriteUsage()

type AgentMessage added in v1.15.23

type AgentMessage struct {
	Task     *SubAgentTask
	RespChan chan<- AgentResponse // caller-owned, per-request
}

AgentMessage is a task delivery envelope sent on an agent's TaskChan.

type AgentOptions added in v1.9.7

type AgentOptions struct {
	Prompt        string
	SysPrompt     string
	Files         []*FileData
	ModelInfo     *data.Model
	MaxRecursions int
	ThinkingLevel string
	EnabledTools  []string // List of enabled embedding tools
	Capabilities  []string // List of enabled capabilities
	YoloMode      bool     // Whether to automatically approve tools
	QuietMode     bool     // If Quiet mode then don't print to console
	OutputFile    string   // If OutputFile is set then write to file
	SessionName   string
	MCPConfig     map[string]*data.MCPServer

	// Sub-agent orchestration fields
	SharedState *data.SharedState // Shared state for inter-agent communication
	AgentName   string            // Name of the agent running this task
	ModelName   string            // Current model name of current agent (agent model key)
}

type AgentResponse added in v1.15.23

type AgentResponse struct {
	TaskKey string
	Result  *SubAgentResult
	Err     error
}

AgentResponse is the signal sent back to the caller when a task finishes.

type AgentRunner added in v1.13.14

type AgentRunner func(*AgentOptions) error

AgentRunner defines the function signature for executing an agent

type Anthropic added in v1.13.5

type Anthropic struct {
	// contains filtered or unexported fields
}

type AnthropicSession added in v1.14.24

type AnthropicSession struct {
	BaseSession
	Messages []anthropic.MessageParam
}

AnthropicSession represents a session using Anthropic format

func (*AnthropicSession) Clear added in v1.14.24

func (s *AnthropicSession) Clear() error

Clear removes all messages from the session

func (*AnthropicSession) GetMessages added in v1.14.24

func (s *AnthropicSession) GetMessages() interface{}

func (*AnthropicSession) Load added in v1.14.24

func (s *AnthropicSession) Load() error

Load retrieves the session from disk (JSONL format).

func (*AnthropicSession) MarshalMessages added in v1.14.24

func (s *AnthropicSession) MarshalMessages(messages []anthropic.MessageParam, dropToolContent bool) []byte

func (*AnthropicSession) Push added in v1.14.24

func (s *AnthropicSession) Push(messages ...interface{}) error

PushMessages adds multiple messages to the session (high performance) Uses append-mode for incremental saves using JSONL format (one message per line)

func (*AnthropicSession) Save added in v1.14.24

func (s *AnthropicSession) Save() error

Save persists the session to disk using JSONL format (one message per line).

func (*AnthropicSession) SetMessages added in v1.14.24

func (s *AnthropicSession) SetMessages(messages interface{})

type AtRefProcessor added in v1.12.9

type AtRefProcessor struct {
	// contains filtered or unexported fields
}

AtRefProcessor handles @ reference processing

func NewAtRefProcessor added in v1.12.9

func NewAtRefProcessor() *AtRefProcessor

NewAtRefProcessor creates a new @ reference processor

func (*AtRefProcessor) AddExcludePattern added in v1.12.9

func (p *AtRefProcessor) AddExcludePattern(pattern string)

AddExcludePattern adds a pattern to exclude from directory listings

func (*AtRefProcessor) ParseAtReferences added in v1.12.9

func (p *AtRefProcessor) ParseAtReferences(text string) []AtReference

ParseAtReferences finds all @ references in the given text

func (*AtRefProcessor) ProcessReferences added in v1.12.9

func (p *AtRefProcessor) ProcessReferences(text string, references []AtReference) (string, error)

ProcessReferences processes all @ references and returns augmented text

func (*AtRefProcessor) ProcessText added in v1.12.9

func (p *AtRefProcessor) ProcessText(text string) (string, error)

ProcessText processes text containing @ references and returns augmented text

func (*AtRefProcessor) SetMaxDirItems added in v1.12.9

func (p *AtRefProcessor) SetMaxDirItems(count int)

SetMaxDirItems sets the maximum number of directory items to list

func (*AtRefProcessor) SetMaxFileSize added in v1.12.9

func (p *AtRefProcessor) SetMaxFileSize(size int64)

SetMaxFileSize sets the maximum file size to include

type AtReference added in v1.12.9

type AtReference struct {
	Original string // Original @ reference text (e.g., "@main.go")
	Path     string // Resolved file/directory path
}

AtReference represents a single @ reference found in text

type BaseSession added in v1.14.24

type BaseSession struct {
	Name string
	Path string
}

BaseSession holds common fields and methods for all session types

func (*BaseSession) Clear added in v1.14.24

func (s *BaseSession) Clear() error

func (*BaseSession) GetMessages added in v1.14.24

func (s *BaseSession) GetMessages() interface{}

func (*BaseSession) GetName added in v1.15.21

func (s *BaseSession) GetName() string

func (*BaseSession) GetPath added in v1.14.24

func (s *BaseSession) GetPath() string

func (*BaseSession) GetTopSessionName added in v1.15.23

func (s *BaseSession) GetTopSessionName() string

GetTopSessionName returns the top session name, which is the first part of the session name. The session name is in the format of "SessionName::TaskKey" For example, if the session name is "Main::Task1", the top session name is "Main" if the session name is "Main", the top session name is "Main"

func (*BaseSession) Load added in v1.14.24

func (s *BaseSession) Load() error

func (*BaseSession) Open added in v1.14.24

func (s *BaseSession) Open(title string) error

Open initializes a session with the provided title, resolving an index to the actual session name if necessary. It sanitizes the session name for the path, and sets the internal path accordingly. Returns an error if the title cannot be resolved.

func (*BaseSession) Push added in v1.14.24

func (s *BaseSession) Push(messages ...interface{})

func (*BaseSession) Save added in v1.14.24

func (s *BaseSession) Save() error

func (*BaseSession) SetMessages added in v1.14.24

func (s *BaseSession) SetMessages(messages interface{})

func (*BaseSession) SetPath added in v1.14.24

func (s *BaseSession) SetPath(title string)

SetPath sets the file path for saving the session

type ContextHooks added in v1.15.27

type ContextHooks struct {
	// contains filtered or unexported fields
}

ContextHooks holds registered providers that contribute additional context to the LLM prompt at the start of every user turn. Each provider is a func() string that returns a formatted context block, or an empty string if it has nothing to contribute (e.g. plugin disabled).

func NewContextHooks added in v1.15.27

func NewContextHooks() ContextHooks

NewContextHooks builds a ContextHooks populated from all currently-enabled context providers.

func (ContextHooks) Collect added in v1.15.27

func (h ContextHooks) Collect() string

Collect calls all registered providers synchronously and returns the joined non-empty contributions, ready for prepending to the LLM prompt.

type ContextManager added in v1.12.14

type ContextManager interface {
	// PruneMessages checks whether the message history exceeds the context limit and
	// applies the configured strategy (truncation or summarisation) if needed.
	// • messages — the typed provider slice (e.g. []openai.ChatCompletionMessage)
	// • extra    — optional additional args (tools, systemPrompt) required by the provider
	// Returns the (possibly pruned) slice, a truncated flag, and any error.
	PruneMessages(messages any, extra ...any) (any, bool, error)

	// GetStrategy returns the active truncation strategy.
	GetStrategy() TruncationStrategy

	// GetMaxOutputTokens returns the model's maximum output token budget.
	GetMaxOutputTokens() int
}

ContextManager is the public interface implemented by each provider-specific context manager. Callers type-assert the returned messages slice to the concrete provider type.

func NewContextManager added in v1.12.14

func NewContextManager(ag *Agent, strategy TruncationStrategy) ContextManager

NewContextManager constructs the correct provider-specific ContextManager for the agent.

type EditorContext added in v1.15.27

type EditorContext struct {
	ActiveEditor     *ActiveEditor    `json:"activeEditor"`
	OpenFiles        []EditorOpenFile `json:"otherOpenFiles"`
	WorkspaceFolders []string         `json:"workspaceFolders"`
}

EditorContext describes the state of the VSCode environment

type EditorOpenFile added in v1.15.27

type EditorOpenFile struct {
	FilePath string `json:"filePath"`
	IsDirty  bool   `json:"isDirty"`
}

type EditorPosition added in v1.15.27

type EditorPosition struct {
	Line      int `json:"line"`
	Character int `json:"character"`
}

type EditorSelection added in v1.15.27

type EditorSelection struct {
	Start EditorPosition `json:"start"`
	End   EditorPosition `json:"end"`
	Text  string         `json:"text"`
}

type ExtractorConfig added in v1.6.14

type ExtractorConfig struct {
	UserAgent          string
	HeaderAccept       string
	Timeout            time.Duration
	MinTextLength      int
	BoilerplateIDs     []string
	BoilerplateClasses []string
}

Configuration options for the text extractor

type FetchResult added in v1.15.3

type FetchResult struct {
	Content string
	Error   error
}

func FetchProcess added in v1.6.14

func FetchProcess(ctx context.Context, urls []string) []FetchResult

type FileData added in v1.4.0

type FileData struct {
	// contains filtered or unexported fields
}

func NewFileData added in v1.4.0

func NewFileData(format string, data []byte, path string) *FileData

func (*FileData) Data added in v1.4.0

func (i *FileData) Data() []byte

func (*FileData) Format added in v1.4.0

func (i *FileData) Format() string

func (*FileData) Path added in v1.6.0

func (i *FileData) Path() string

type FileHooks added in v1.15.25

type FileHooks struct {
	OnPreview []func(path, content string)
	OnSaved   []func(path string)
	OnDiscard []func(path string)
}

FileHooks holds registered callbacks for file lifecycle events. All hook funcs are invoked asynchronously by their caller.

func NewFileHooks added in v1.15.25

func NewFileHooks() FileHooks

NewFileHooks builds a FileHooks populated from all currently-enabled plugins.

func (*FileHooks) Discard added in v1.15.25

func (h *FileHooks) Discard(path string)

Discard dispatches the discard event to all registered hooks.

func (*FileHooks) Preview added in v1.15.25

func (h *FileHooks) Preview(path, content string)

Preview dispatches the preview event to all registered hooks.

func (*FileHooks) Saved added in v1.15.25

func (h *FileHooks) Saved(path string)

Saved dispatches the saved event to all registered hooks.

type Gemini added in v1.15.25

type Gemini struct {
	// contains filtered or unexported fields
}

type GeminiSession added in v1.14.24

type GeminiSession struct {
	BaseSession
	Messages []*genai.Content
}
  • Google Gemini session

GeminiSession manages sessions for Google's Gemini model

func (*GeminiSession) Clear added in v1.14.24

func (s *GeminiSession) Clear() error

Clear removes all messages from the session

func (*GeminiSession) GetMessages added in v1.14.24

func (s *GeminiSession) GetMessages() interface{}

func (*GeminiSession) Load added in v1.14.24

func (s *GeminiSession) Load() error

Load retrieves the Gemini session from disk (JSONL format).

func (*GeminiSession) MarshalMessages added in v1.14.24

func (s *GeminiSession) MarshalMessages(messages []*genai.Content, dropToolContent bool) []byte

func (*GeminiSession) Push added in v1.14.24

func (s *GeminiSession) Push(messages ...interface{}) error

PushMessages adds multiple content items to the history (high performance) Uses append-mode for incremental saves using JSONL format (one message per line)

func (*GeminiSession) Save added in v1.14.24

func (s *GeminiSession) Save() error

Save persists the Gemini session to disk using JSONL format (one message per line).

func (*GeminiSession) SetMessages added in v1.14.24

func (s *GeminiSession) SetMessages(messages interface{})

type MCPClient added in v1.11.4

type MCPClient struct {
	// contains filtered or unexported fields
}

func GetMCPClient added in v1.11.4

func GetMCPClient() *MCPClient

func (*MCPClient) AddHttpServer added in v1.11.4

func (mc *MCPClient) AddHttpServer(name string, url string, headers map[string]string) error

func (*MCPClient) AddSseServer added in v1.11.4

func (mc *MCPClient) AddSseServer(name string, url string, headers map[string]string) error

func (*MCPClient) AddStdServer added in v1.11.4

func (mc *MCPClient) AddStdServer(name string, cmd string, env map[string]string, cwd string, args ...string) error

func (*MCPClient) CallTool added in v1.11.4

func (mc *MCPClient) CallTool(toolName string, args map[string]any) (*MCPToolResponse, error)

func (*MCPClient) Close added in v1.11.4

func (mc *MCPClient) Close()

func (*MCPClient) FindTool added in v1.11.4

func (mc *MCPClient) FindTool(toolName string) *MCPSession

func (*MCPClient) GetAllServers added in v1.11.4

func (mc *MCPClient) GetAllServers() []*MCPServer

Returns a map grouping tools by MCP server session name, with each session containing a slice of its available tools.

func (*MCPClient) GetPrompts added in v1.11.8

func (mc *MCPClient) GetPrompts(session *MCPSession) (*[]MCPPrompt, error)

func (*MCPClient) GetResources added in v1.11.8

func (mc *MCPClient) GetResources(session *MCPSession) (*[]MCPResource, error)

func (*MCPClient) GetTools added in v1.11.4

func (mc *MCPClient) GetTools(session *MCPSession) (*[]MCPTool, error)

func (*MCPClient) Init added in v1.11.4

func (mc *MCPClient) Init(servers map[string]*data.MCPServer, option MCPLoadOption) error

three types of transports supported: httpUrl → StreamableHTTPClientTransport url → SSEClientTransport command → StdioClientTransport Only want list all servers, unless loadAll is false, then only load allowed servers

func (*MCPClient) IsReady added in v1.15.4

func (mc *MCPClient) IsReady() bool

IsReady returns true if the client is initialized and has at least one tool loaded. It is safe to call without locking.

func (*MCPClient) PreloadAsync added in v1.15.4

func (mc *MCPClient) PreloadAsync(servers map[string]*data.MCPServer, option MCPLoadOption)

PreloadAsync initializes the MCP client in the background.

type MCPLoadOption added in v1.11.8

type MCPLoadOption struct {
	LoadAll       bool // load all tools(allowed|blocked)
	LoadTools     bool // load tools (tools/list)
	LoadResources bool // load resources (resources/list)
	LoadPrompts   bool // load prompts (prompts/list)
}

type MCPPrompt added in v1.11.8

type MCPPrompt struct {
	Name        string
	Description string
	Parameters  map[string]string
}

type MCPResource added in v1.11.8

type MCPResource struct {
	Name        string
	Description string
	URI         string
	MIMEType    string
}

type MCPServer added in v1.11.4

type MCPServer struct {
	Name      string
	Allowed   bool
	Tools     *[]MCPTool
	Resources *[]MCPResource
	Prompts   *[]MCPPrompt
}

type MCPSession added in v1.11.4

type MCPSession struct {
	// contains filtered or unexported fields
}

type MCPTool added in v1.11.8

type MCPTool struct {
	Name        string
	Description string
	Parameters  map[string]string
	Properties  map[string]*jsonschema.Schema // Keep origin JSON Schema
}

type MCPToolResponse added in v1.11.6

type MCPToolResponse struct {
	Types    []MCPToolResponseType
	Contents []string
}

type MCPToolResponseType added in v1.11.6

type MCPToolResponseType string
const (
	MCPResponseText  MCPToolResponseType = "text"
	MCPResponseImage MCPToolResponseType = "image"
	MCPResponseAudio MCPToolResponseType = "audio"
)

type Markdown added in v1.9.7

type Markdown struct {
	// contains filtered or unexported fields
}

func NewMarkdown added in v1.9.7

func NewMarkdown() *Markdown

NewMarkdown creates a new instance of Markdown

func (*Markdown) Render added in v1.9.7

func (mr *Markdown) Render(r io.Output)

RenderMarkdown clears the streaming output and re-renders the entire Markdown

func (*Markdown) Write added in v1.9.7

func (mr *Markdown) Write(args ...interface{})

func (*Markdown) Writef added in v1.9.7

func (mr *Markdown) Writef(format string, args ...interface{})

RenderString streams output incrementally and tracks the number of lines

type ModelInfo added in v1.13.2

type ModelInfo struct {
	ApiKey          string
	EndPoint        string
	Model           string
	Provider        string
	Temperature     float32
	TopP            float32 // Top-p sampling parameter
	Seed            *int32  // Seed for deterministic generation
	ContextLength   int32   // Model context length limit
	MaxOutputTokens int32   // Model max output tokens
}

type ModelLimits added in v1.12.14

type ModelLimits struct {
	ContextWindow   int // Total context window in tokens
	MaxOutputTokens int // Maximum output tokens allowed
}

ModelLimits contains context window configuration for a model

func (ModelLimits) MaxInputTokens added in v1.12.14

func (ml ModelLimits) MaxInputTokens(bufferPercent float64) int

MaxInputTokens calculates the maximum input tokens with a safety buffer. The buffer ensures there's always room for the model's response.

type OpenAI added in v1.10.4

type OpenAI struct {
	// contains filtered or unexported fields
}

OpenAI manages the state of an ongoing session with an AI assistant

type OpenAISession added in v1.14.24

type OpenAISession struct {
	BaseSession
	Messages []openai.ChatCompletionMessageParamUnion
}

OpenAISession represents a session using OpenAI format

func (*OpenAISession) Clear added in v1.14.24

func (s *OpenAISession) Clear() error

Clear removes all messages from the session

func (*OpenAISession) GetMessages added in v1.14.24

func (s *OpenAISession) GetMessages() interface{}

func (*OpenAISession) Load added in v1.14.24

func (s *OpenAISession) Load() error

Load retrieves the session from disk (JSONL format).

func (*OpenAISession) MarshalMessages added in v1.14.24

func (s *OpenAISession) MarshalMessages(messages []openai.ChatCompletionMessageParamUnion, dropToolContent bool) []byte

func (*OpenAISession) Push added in v1.14.24

func (s *OpenAISession) Push(messages ...interface{}) error

PushMessages adds multiple messages to the session (high performance) Uses append-mode for incremental saves using JSONL format (one message per line)

func (*OpenAISession) Save added in v1.14.24

func (s *OpenAISession) Save() error

Save persists the session to disk using JSONL format (one message per line).

func (*OpenAISession) SetMessages added in v1.14.24

func (s *OpenAISession) SetMessages(messages interface{})

type OpenChat added in v1.5.1

type OpenChat struct {
	// contains filtered or unexported fields
}

session manages the state of an ongoing session with an AI assistant

type OpenChatSession added in v1.14.24

type OpenChatSession struct {
	BaseSession
	Messages []*model.ChatCompletionMessage
}

OpenChatSession manages sessions for Volcengine model

func (*OpenChatSession) Clear added in v1.14.24

func (s *OpenChatSession) Clear() error

Clear removes all messages from the session

func (*OpenChatSession) GetMessages added in v1.14.24

func (s *OpenChatSession) GetMessages() interface{}

func (*OpenChatSession) Load added in v1.14.24

func (s *OpenChatSession) Load() error

Load retrieves the session from disk (JSONL format).

func (*OpenChatSession) MarshalMessages added in v1.14.24

func (s *OpenChatSession) MarshalMessages(messages []*model.ChatCompletionMessage, dropToolContent bool) []byte

func (*OpenChatSession) Push added in v1.14.24

func (s *OpenChatSession) Push(messages ...interface{}) error

PushMessages adds multiple messages to the session (high performance) Uses append-mode for incremental saves using JSONL format (one message per line)

func (*OpenChatSession) Save added in v1.14.24

func (s *OpenChatSession) Save() error

Save persists the session to disk using JSONL format (one message per line).

func (*OpenChatSession) SetMessages added in v1.14.24

func (s *OpenChatSession) SetMessages(messages interface{})

type OpenFunctionDefinition added in v1.10.4

type OpenFunctionDefinition struct {
	Name        string
	Description string
	Parameters  map[string]interface{}
}

OpenFunctionDefinition is a generic function definition that is not tied to any specific model.

type OpenProcessor added in v1.10.4

type OpenProcessor struct {
	// contains filtered or unexported fields
}

OpenProcessor is the main processor for OpenAI-like models For tools implementation - It manages the context, notifications, data streaming, and tool usage - It handles queries and references, and maintains the status stack

type OpenTool added in v1.10.4

type OpenTool struct {
	Type     ToolType
	Function *OpenFunctionDefinition
}

OpenTool is a generic tool definition that is not tied to any specific model.

func GetOpenToolsFiltered added in v1.14.2

func GetOpenToolsFiltered(allowedTools []string) []*OpenTool

GetOpenToolsFiltered returns tools filtered by the allowed list. If allowedTools is nil or empty, returns all tools. Unknown tool names are gracefully ignored.

func MCPToolsToOpenTool added in v1.11.4

func MCPToolsToOpenTool(mcpTool MCPTool) *OpenTool

MCPToolsToOpenTool converts an MCPTools struct to an OpenTool with proper JSON schema

func (*OpenTool) ToAnthropicTool added in v1.13.5

func (ot *OpenTool) ToAnthropicTool() anthropic.ToolUnionParam

ToAnthropicTool converts a GenericTool to an anthropic.ToolUnionParam

func (*OpenTool) ToGeminiFunctions added in v1.10.4

func (ot *OpenTool) ToGeminiFunctions() *genai.FunctionDeclaration

ToGeminiFunctions converts a GenericTool to a genai.Tool

func (*OpenTool) ToOpenAITool added in v1.10.4

func (ot *OpenTool) ToOpenAITool() openai.ChatCompletionToolUnionParam

ToOpenAITool converts a GenericTool to an openai.ChatCompletionToolUnionParam

func (*OpenTool) ToOpenChatTool added in v1.10.4

func (ot *OpenTool) ToOpenChatTool() *model.Tool

ToOpenChatTool converts a GenericTool to a model.Tool

type ReleaseInfo added in v1.14.24

type ReleaseInfo struct {
	Version   string
	AssetURL  string
	AssetName string
	Newer     bool
}

ReleaseInfo carries the result of a version check.

func CheckLatest added in v1.14.24

func CheckLatest(currentVersion string) (*ReleaseInfo, error)

CheckLatest queries GitHub Releases for the latest version. Returns (release, error). isNewer is true when remote > currentVersion.

type SearchEngine added in v1.9.4

type SearchEngine struct {
	UseSearch     bool
	Name          string
	ApiKey        string
	CxKey         string
	MaxReferences int

	// DeepDive indicates how many links to fetch content from
	// If 0, it defaults to a small number (e.g. 3) for efficiency.
	DeepDive int
}

func (*SearchEngine) BingSearch added in v1.9.4

func (s *SearchEngine) BingSearch(query string) (map[string]any, error)

--- Simulation of Bing Search ---

func (*SearchEngine) GoogleSearch added in v1.9.4

func (s *SearchEngine) GoogleSearch(query string) (map[string]any, error)

Alternative approach with explicit conversions for protocol buffer compatibility

func (*SearchEngine) NoneSearch added in v1.9.4

func (s *SearchEngine) NoneSearch(query string) (map[string]any, error)

func (*SearchEngine) RetrieveQueries added in v1.9.4

func (s *SearchEngine) RetrieveQueries(queries []string) string

func (*SearchEngine) RetrieveReferences added in v1.9.4

func (s *SearchEngine) RetrieveReferences(references []map[string]any) string

func (*SearchEngine) SerpAPISearch added in v1.9.4

func (s *SearchEngine) SerpAPISearch(query string, engine string) (map[string]any, error)

func (*SearchEngine) TavilySearch added in v1.9.4

func (s *SearchEngine) TavilySearch(query string) (map[string]any, error)

type Session added in v1.15.21

type Session interface {
	SetPath(title string)
	GetPath() string
	GetName() string
	GetTopSessionName() string
	Load() error
	Save() error
	Open(title string) error
	Clear() error
	Push(messages ...interface{}) error
	GetMessages() interface{}
	SetMessages(messages interface{})
}

Session is an interface for handling session history

func ConstructSession added in v1.15.21

func ConstructSession(sessionName string, provider string) (Session, error)

ConstructSession constructs a new session based on the provider

type SessionMeta added in v1.14.24

type SessionMeta struct {
	Name     string
	Provider string
	ModTime  int64
	Empty    bool
}

SessionMeta metadata for a session

func ListSortedSessions added in v1.14.24

func ListSortedSessions(detectProvider bool, includeSubAgents bool) ([]SessionMeta, error)

ListSortedSessions returns a slice of sessionMeta sorted by modTime descending detectProvider: whether to detect the provider of the session includeSubAgents: whether to include subagent sessions

type SkillManager added in v1.13.18

type SkillManager struct {
	// contains filtered or unexported fields
}

SkillManager handles skill operations

func GetSkillManager added in v1.13.18

func GetSkillManager() *SkillManager

GetSkillManager returns the singleton instance of SkillManager

func NewSkillManager added in v1.13.18

func NewSkillManager() *SkillManager

NewSkillManager creates a new SkillManager

func (*SkillManager) ActivateSkill added in v1.13.18

func (sm *SkillManager) ActivateSkill(name string) (string, string, string, error)

ActivateSkill activates a skill by name and returns its instructions, description, and available resources.

func (*SkillManager) CreateTestSkill added in v1.13.18

func (sm *SkillManager) CreateTestSkill(rootPath string) (string, error)

CreateTestSkill creates a temporary test skill for verification

func (*SkillManager) GenerateFileTree added in v1.13.18

func (sm *SkillManager) GenerateFileTree(dir string) (string, error)

GenerateFileTree generates a professional tree representation of the skill directory utilizing Unicode box-drawing characters for enhanced structural clarity.

func (*SkillManager) GetAvailableSkills added in v1.13.18

func (sm *SkillManager) GetAvailableSkills() string

GetAvailableSkills returns the XML string for system prompt injection Skills that are disabled in settings.json are excluded from the output.

func (*SkillManager) GetAvailableSkillsMetadata added in v1.14.21

func (sm *SkillManager) GetAvailableSkillsMetadata() []data.SkillMetadata

GetAvailableSkillsMetadata returns the list of available skills

func (*SkillManager) LoadMetadata added in v1.13.18

func (sm *SkillManager) LoadMetadata() error

LoadMetadata scans and loads skill metadata

type StatusStack added in v1.9.2

type StatusStack struct {
	// contains filtered or unexported fields
}

StateStack is a stack data structure for managing states.

func (*StatusStack) ChangeTo added in v1.9.2

func (s *StatusStack) ChangeTo(
	proc chan<- StreamNotify,
	notify StreamNotify,
	proceed <-chan bool)

func (*StatusStack) Clear added in v1.9.2

func (s *StatusStack) Clear()

func (*StatusStack) Debug added in v1.9.2

func (s *StatusStack) Debug()

func (*StatusStack) IsEmpty added in v1.9.2

func (s *StatusStack) IsEmpty() bool

func (*StatusStack) IsTop added in v1.9.2

func (s *StatusStack) IsTop(status StreamStatus) bool

func (*StatusStack) Peek added in v1.9.2

func (s *StatusStack) Peek() StreamStatus

Peek returns the state from the top of the stack without removing it. If the stack is empty, it returns StateNormal.

func (*StatusStack) Pop added in v1.9.2

func (s *StatusStack) Pop() StreamStatus

Pop removes and returns the state from the top of the stack. If the stack is empty, it returns StateNormal.

func (*StatusStack) Push added in v1.9.2

func (s *StatusStack) Push(status StreamStatus)

Push adds a state to the top of the stack.

func (*StatusStack) Size added in v1.9.2

func (s *StatusStack) Size() int

type StreamData added in v1.9.2

type StreamData struct {
	Text string
	Type StreamDataType
}

type StreamDataType added in v1.9.2

type StreamDataType int
const (
	DataTypeUnknown   StreamDataType = iota
	DataTypeNormal                   // 1
	DataTypeReasoning                // 2
	DataTypeFinished                 // 3
)

type StreamNotify

type StreamNotify struct {
	Status StreamStatus
	Data   string      // For text content or error messages
	Extra  interface{} // For additional metadata (e.g., switch instruction)
}

type StreamStatus

type StreamStatus int
const (
	StatusUnknown StreamStatus = iota
	StatusProcessing
	StatusStarted
	StatusFinished
	StatusWarn
	StatusError
	StatusReasoning
	StatusReasoningOver
	StatusFunctionCalling
	StatusFunctionCallingOver
	StatusDiffConfirm
	StatusDiffConfirmOver
	StatusSwitchAgent
	StatusUserCancel
)

type SubAgentExecutor added in v1.13.14

type SubAgentExecutor struct {
	// contains filtered or unexported fields
}

SubAgentExecutor manages sub-agent lifecycle and message routing

func NewSubAgentExecutor added in v1.13.14

func NewSubAgentExecutor(state *data.SharedState, mainSessionName string) *SubAgentExecutor

NewSubAgentExecutor creates a new SubAgentExecutor

func (*SubAgentExecutor) Dispatch added in v1.15.23

func (e *SubAgentExecutor) Dispatch(tasks []*SubAgentTask) ([]AgentResponse, error)

Dispatch fans out tasks asynchronously to subagents and waits for all responses.

func (*SubAgentExecutor) FormatSummary added in v1.13.14

func (e *SubAgentExecutor) FormatSummary(responses []AgentResponse) string

FormatSummary returns a brief summary of task execution

func (*SubAgentExecutor) Shutdown added in v1.15.23

func (e *SubAgentExecutor) Shutdown()

Shutdown closes all agent task channels, allowing their event loops to exit.

type SubAgentResult added in v1.13.14

type SubAgentResult struct {
	AgentName  string         // Agent that executed
	Status     SubAgentStatus // Execution status
	Progress   string         // Human-readable progress description
	OutputFile string         // Path to detailed output
	TaskKey    string         // Original task key
	StateKey   string         // Key where result was stored in SharedState (agentName_taskKey)
	Error      error          // Error if failed
	Duration   time.Duration  // Execution duration
	StartTime  time.Time      // When execution started
	EndTime    time.Time      // When execution ended
}

SubAgentResult represents the outcome of a sub-agent execution

type SubAgentStatus added in v1.13.14

type SubAgentStatus int

SubAgentStatus represents the execution status of a sub-agent task

const (
	StatusPending SubAgentStatus = iota
	StatusRunning
	StatusCompleted
	StatusFailed
	StatusCancelled
)

func (SubAgentStatus) String added in v1.13.14

func (s SubAgentStatus) String() string

type SubAgentTask added in v1.13.14

type SubAgentTask struct {
	CallerAgentName string   // Caller agent name
	AgentName       string   // Agent profile to use
	Instruction     string   // Task instruction/prompt
	TaskKey         string   // Key to store result in SharedState (becomes agentName_taskKey)
	InputKeys       []string // Keys to read as input context (virtual files), injected into instruction
}

SubAgentTask represents a single sub-agent invocation request

type SwitchAgentError added in v1.13.10

type SwitchAgentError struct {
	TargetAgent string
	Instruction string
}

func AsSwitchAgentError added in v1.14.21

func AsSwitchAgentError(err error) (SwitchAgentError, bool)

AsSwitchAgentError safely extracts a SwitchAgentError from an error, handling both value and pointer variants.

func (SwitchAgentError) Error added in v1.13.10

func (e SwitchAgentError) Error() string

type TavilyError added in v1.2.0

type TavilyError struct {
	Detail TavilyErrorDetail `json:"detail"`
}

type TavilyErrorDetail added in v1.2.0

type TavilyErrorDetail struct {
	Error string `json:"error"`
}

type TavilyResponse added in v1.2.0

type TavilyResponse struct {
	Query        string         `json:"query"`
	Answer       string         `json:"answer"`
	Images       []string       `json:"images"`
	Results      []TavilyResult `json:"results"`
	ResponseTime float32        `json:"response_time"` // e.g., "1.67"
}

Define a struct for the overall Tavily API response.

type TavilyResult added in v1.2.0

type TavilyResult struct {
	Title      string  `json:"title"`
	URL        string  `json:"url"`
	Content    string  `json:"content"`
	Score      float64 `json:"score"`
	RawContent *string `json:"raw_content"`
}

Define a struct for each result in the Tavily API response.

type ThinkingLevel added in v1.13.7

type ThinkingLevel string

ThinkingLevel represents the unified thinking/reasoning level across providers. Maps to provider-specific configurations: - OpenAI: reasoning_effort ("off"/"minimal"/"low"/"medium"/"high") - OpenChat: model.Thinking + ReasoningEffort - Gemini 2.5: ThinkingBudget (token count, -1 for dynamic) - Gemini 3: ThinkingLevel ("OFF"/"MINIMAL"/"LOW"/"MEDIUM"/"HIGH") - Anthropic: thinking.budget_tokens (0/1024/4096/16384/31999)

const (
	ThinkingLevelOff     ThinkingLevel = "off"
	ThinkingLevelMinimal ThinkingLevel = "minimal"
	ThinkingLevelLow     ThinkingLevel = "low"
	ThinkingLevelMedium  ThinkingLevel = "medium"
	ThinkingLevelHigh    ThinkingLevel = "high"
)

func AllThinkingLevels added in v1.13.7

func AllThinkingLevels() []ThinkingLevel

AllThinkingLevels returns all valid thinking levels in order

func ParseThinkingLevel added in v1.13.7

func ParseThinkingLevel(s string) ThinkingLevel

ParseThinkingLevel normalizes user input to a valid ThinkingLevel. Supports backward compatibility with boolean values.

func (ThinkingLevel) Display added in v1.13.7

func (t ThinkingLevel) Display() string

Display returns a colorized display string for CLI output

func (ThinkingLevel) IsEnabled added in v1.13.7

func (t ThinkingLevel) IsEnabled() bool

IsEnabled returns true if thinking is enabled (not off)

func (ThinkingLevel) String added in v1.13.7

func (t ThinkingLevel) String() string

String returns the string representation

func (ThinkingLevel) ToAnthropicParams added in v1.13.7

func (t ThinkingLevel) ToAnthropicParams() anthropic.ThinkingConfigParamUnion

ToAnthropicParams returns the thinking budget tokens for Anthropic. Returns 0 for ThinkingLevelOff.

func (ThinkingLevel) ToGeminiConfig added in v1.13.7

func (t ThinkingLevel) ToGeminiConfig(modelName string) *genai.ThinkingConfig

ToGeminiConfig returns the Gemini ThinkingConfig based on model version. Gemini 3 uses ThinkingLevel, Gemini 2.5 uses ThinkingBudget.

func (ThinkingLevel) ToOpenAIReasoningEffort added in v1.13.7

func (t ThinkingLevel) ToOpenAIReasoningEffort() string

ToOpenAIReasoningEffort returns the OpenAI reasoning_effort parameter value. Returns empty string for ThinkingLevelOff (no param should be set).

func (ThinkingLevel) ToOpenChatParams added in v1.13.7

func (t ThinkingLevel) ToOpenChatParams() (*model.Thinking, *model.ReasoningEffort)

ToOpenChatParams returns the OpenChat model.Thinking and ReasoningEffort params.

type TokenCache added in v1.12.14

type TokenCache struct {
	// contains filtered or unexported fields
}

TokenCache provides a thread-safe cache for storing token counts of LLM messages. It uses JSON-marshaled message content as keys to ensure correct uniqueness.

func GetGlobalTokenCache added in v1.12.14

func GetGlobalTokenCache() *TokenCache

GetGlobalTokenCache returns the global token cache instance

func NewTokenCache added in v1.12.14

func NewTokenCache(maxSize int) *TokenCache

NewTokenCache creates a new TokenCache with the specified maximum size

func (*TokenCache) Clear added in v1.12.14

func (tc *TokenCache) Clear()

Clear removes all entries from the cache

func (*TokenCache) Get added in v1.12.14

func (tc *TokenCache) Get(key string) (int, bool)

Get retrieves a cached token count for the given key. Returns the count and true if found, or 0 and false if not found.

func (*TokenCache) GetOrComputeAnthropicTokens added in v1.13.5

func (tc *TokenCache) GetOrComputeAnthropicTokens(msg anthropic.MessageParam) int

GetOrComputeAnthropicTokens retrieves cached tokens or computes and caches them.

func (*TokenCache) GetOrComputeGeminiTokens added in v1.12.14

func (tc *TokenCache) GetOrComputeGeminiTokens(msg *genai.Content) int

GetOrComputeGeminiTokens retrieves cached tokens or computes and caches them for Gemini.

func (*TokenCache) GetOrComputeOpenAITokens added in v1.12.14

func (tc *TokenCache) GetOrComputeOpenAITokens(msg openai.ChatCompletionMessageParamUnion) int

GetOrComputeOpenAITokens retrieves cached tokens or computes and caches them.

func (*TokenCache) GetOrComputeOpenChatTokens added in v1.12.14

func (tc *TokenCache) GetOrComputeOpenChatTokens(msg *model.ChatCompletionMessage) int

GetOrComputeOpenChatTokens retrieves cached tokens or computes and caches them.

func (*TokenCache) Set added in v1.12.14

func (tc *TokenCache) Set(key string, count int)

Set stores a token count for the given key. If the cache is full, it evicts approximately half of the entries.

func (*TokenCache) Size added in v1.12.14

func (tc *TokenCache) Size() int

Size returns the current number of entries in the cache

func (*TokenCache) Stats added in v1.12.14

func (tc *TokenCache) Stats() (hits, misses int64, size int)

Stats returns cache statistics (hits, misses, size)

type TokenUsage added in v1.9.5

type TokenUsage struct {
	InputTokens   int
	OutputTokens  int
	CachedTokens  int
	ThoughtTokens int
	TotalTokens   int
	// For providers like Anthropic, cached tokens are not included in the prompt tokens
	// OpenAI, OpenChat and Gemini all include cached tokens in the prompt tokens
	CachedTokensInPrompt bool
}

func NewTokenUsage added in v1.9.7

func NewTokenUsage() *TokenUsage

func (*TokenUsage) RecordTokenUsage added in v1.9.5

func (tu *TokenUsage) RecordTokenUsage(input, output, cached, thought, total int)

func (*TokenUsage) Render added in v1.9.7

func (tu *TokenUsage) Render(output io.Output)

type ToolFunc added in v1.15.0

type ToolFunc func() (string, error)

type ToolType added in v1.10.4

type ToolType string

type TruncationStrategy added in v1.12.14

type TruncationStrategy string

TruncationStrategy defines how to handle context overflow

const (
	// StrategyTruncateOldest removes oldest messages first, preserving system prompt
	StrategyTruncateOldest TruncationStrategy = "truncate_oldest"

	// StrategySummarize replaces old context with a summary
	StrategySummarize TruncationStrategy = "summarize"

	// StrategyNone disables truncation - will fail if context exceeds limit
	StrategyNone TruncationStrategy = "none"

	// DefaultBufferPercent is the default safety buffer (80% of available space)
	/*
	 * Before context window fills up, you may run into "context rot,"
	 * where model performance degrades as input length increases even when there's technically room left
	 * — LLMs don't process all tokens equally, with attention concentrating on the beginning and end,
	 * so information in the middle gets less reliable processing.
	 *
	 * 80% leaves room for the model to "breathe" and maintain high-quality reasoning.
	 */
	DefaultBufferPercent = 0.80
)

type UniversalMessage added in v1.13.10

type UniversalMessage struct {
	Role      UniversalRole // "system", "user", "assistant"
	Content   string        // Main text content
	Reasoning string        // Thinking/reasoning content (if any)
}

* UniversalMessage is a provider-agnostic representation of a chat message. * It extracts only the essential semantic content for cross-provider conversion. * * Key Design Decisions: * 1. Only text content and reasoning are preserved. * 2. Tool calls, tool responses, images, and other multimodal content are discarded. * 3. Role normalization: "model" (Gemini) → "assistant"

func ParseAnthropicMessages added in v1.13.10

func ParseAnthropicMessages(messages []anthropic.MessageParam) []UniversalMessage

ParseAnthropicMessages converts Anthropic messages to universal format. Extracts: OfText blocks, OfThinking/OfRedactedThinking blocks, OfToolResult as text Ignores: OfToolUse, OfImage, OfDocument

func ParseGeminiMessages added in v1.13.10

func ParseGeminiMessages(messages []*gemini.Content) []UniversalMessage

ParseGeminiMessages converts Gemini messages to universal format. Extracts: Parts.Text, Parts.Thought, FunctionResponse as text Ignores: FunctionCall, InlineData Maps: "model" → "assistant"

func ParseOpenAIMessages added in v1.13.10

func ParseOpenAIMessages(messages []openai.ChatCompletionMessageParamUnion) []UniversalMessage

ParseOpenAIMessages converts OpenAI messages to universal format. Extracts: Content, MultiContent[].Text, ReasoningContent, Tool Responses as text Ignores: ToolCalls, FunctionCall, ImageURL

func ParseOpenChatMessages added in v1.13.10

func ParseOpenChatMessages(messages []model.ChatCompletionMessage) []UniversalMessage

ParseOpenChatMessages converts OpenChat (Volcengine) messages to universal format.

type UniversalRole added in v1.13.10

type UniversalRole string
const (
	UniversalRoleSystem    UniversalRole = "system"
	UniversalRoleUser      UniversalRole = "user"
	UniversalRoleAssistant UniversalRole = "assistant"
)

func ConvertToUniversalRole added in v1.13.10

func ConvertToUniversalRole(role string) UniversalRole

func (UniversalRole) ConvertToAnthropic added in v1.13.10

func (r UniversalRole) ConvertToAnthropic() anthropic.MessageParamRole

func (UniversalRole) ConvertToGemini added in v1.13.10

func (r UniversalRole) ConvertToGemini() string

func (UniversalRole) ConvertToOpenAI added in v1.13.10

func (r UniversalRole) ConvertToOpenAI() string

func (UniversalRole) ConvertToOpenChat added in v1.13.10

func (r UniversalRole) ConvertToOpenChat() string

func (UniversalRole) String added in v1.13.10

func (r UniversalRole) String() string

type UserCancelError added in v1.14.13

type UserCancelError struct {
	Reason string
}

func AsUserCancelError added in v1.14.21

func AsUserCancelError(err error) (UserCancelError, bool)

AsUserCancelError safely extracts a UserCancelError from an error, handling both value and pointer variants.

func (UserCancelError) Error added in v1.14.13

func (e UserCancelError) Error() string

Error implements [error].

type WorkflowManager added in v1.14.11

type WorkflowManager struct {
	// contains filtered or unexported fields
}

WorkflowManager handles workflow operations

func GetWorkflowManager added in v1.14.11

func GetWorkflowManager() *WorkflowManager

GetWorkflowManager returns the singleton instance of WorkflowManager

func (*WorkflowManager) CreateWorkflow added in v1.14.11

func (wm *WorkflowManager) CreateWorkflow(name, description, content string) error

CreateWorkflow creates a new workflow file

func (*WorkflowManager) GetCommands added in v1.14.11

func (wm *WorkflowManager) GetCommands() map[string]string

GetCommands returns a map of command->description for chat suggestions

func (*WorkflowManager) GetWorkflowByName added in v1.14.11

func (wm *WorkflowManager) GetWorkflowByName(name string) (string, string, error)

GetWorkflowByName retrieves a workflow by its name (case-insensitive) Returns content, description, and error

func (*WorkflowManager) GetWorkflowNames added in v1.14.11

func (wm *WorkflowManager) GetWorkflowNames() []string

GetWorkflowNames returns a sorted list of all available workflow names

func (*WorkflowManager) IsReservedCommand added in v1.14.11

func (wm *WorkflowManager) IsReservedCommand(name string) bool

IsReservedCommand checks if a command is reserved

func (*WorkflowManager) LoadMetadata added in v1.14.11

func (wm *WorkflowManager) LoadMetadata(reservedCommands map[string]string) error

LoadMetadata scans and loads workflow metadata

func (*WorkflowManager) RemoveWorkflow added in v1.14.11

func (wm *WorkflowManager) RemoveWorkflow(name string) error

RemoveWorkflow removes a workflow

func (*WorkflowManager) RenameWorkflow added in v1.14.11

func (wm *WorkflowManager) RenameWorkflow(oldName, newName string) error

RenameWorkflow renames a workflow

func (*WorkflowManager) UpdateWorkflow added in v1.14.11

func (wm *WorkflowManager) UpdateWorkflow(name, description, content string) error

UpdateWorkflow updates an existing workflow file

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL