import type { Promisable } from 'type-fest';
import type { AgentModelRequirements } from '../../book-2.0/agent-source/AgentModelRequirements';
import type { string_book } from '../../book-2.0/agent-source/string_book';
import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
import type { AvailableModel } from '../../execution/AvailableModel';
import type { CallChatModelStreamOptions, LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult } from '../../execution/PromptResult';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecutionToolsOptions';
/**
 * Execution Tools for calling LLM models with a predefined agent "soul"
 * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
 *
 * Note: [🦖] There are several different things in Promptbook:
 * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
 * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
 * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
 * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
 * - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
 * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
 *
 * @public exported from `@promptbook/core`
 */
export declare class AgentLlmExecutionTools implements LlmExecutionTools {
    protected readonly options: CreateAgentLlmExecutionToolsOptions;
    /**
     * Cached AgentKit agents to avoid rebuilding identical instances.
     */
    private static agentKitAgentCache;
    /**
     * Cache of OpenAI assistants to avoid creating duplicates
     */
    private static assistantCache;
    /**
     * Cache of OpenAI vector stores to avoid creating duplicates
     */
    private static vectorStoreCache;
    /**
     * Cached model requirements to avoid re-parsing the agent source
     */
    private _cachedModelRequirements;
    /**
     * Cached parsed agent information
     */
    private _cachedAgentInfo;
    /**
     * Creates new AgentLlmExecutionTools
     *
     * @param llmTools The underlying LLM execution tools to wrap
     * @param agentSource The agent source string that defines the agent's behavior
     */
    constructor(options: CreateAgentLlmExecutionToolsOptions);
    /**
     * Updates the agent source and clears the cache
     *
     * @param agentSource The new agent source string
     */
    protected updateAgentSource(agentSource: string_book): void;
    /**
     * Get cached or parse agent information
     */
    private getAgentInfo;
    /**
     * Get cached or create agent model requirements
     *
     * Note: [🐤] This is names `getModelRequirements` *(not `getAgentModelRequirements`)* because in future these two will be united
     */
    getModelRequirements(): Promise<AgentModelRequirements>;
    get title(): string_title & string_markdown_text;
    get description(): string_markdown;
    get profile(): ChatParticipant | undefined;
    checkConfiguration(): Promisable<void>;
    /**
     * Returns a virtual model name representing the agent behavior
     */
    get modelName(): string_model_name;
    listModels(): Promisable<ReadonlyArray<AvailableModel>>;
    /**
     * Calls the chat model with agent-specific system prompt and requirements
     */
    callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
    /**
     * Calls the chat model with agent-specific system prompt and requirements with streaming
     */
    callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void, options?: CallChatModelStreamOptions): Promise<ChatPromptResult>;
}
/**
 * TODO: [🍚] Implement Destroyable pattern to free resources
 * TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
 */
