import type { CallChatModelStreamOptions, LlmExecutionTools } from '../../execution/LlmExecutionTools';
import type { ChatPromptResult } from '../../execution/PromptResult';
import type { ModelRequirements } from '../../types/ModelRequirements';
import type { Prompt } from '../../types/Prompt';
import type { string_markdown, string_markdown_text, string_title, string_token } from '../../types/typeAliases';
import type { OpenAiAssistantExecutionToolsOptions } from './OpenAiAssistantExecutionToolsOptions';
import { OpenAiVectorStoreHandler } from './OpenAiVectorStoreHandler';
/**
 * Execution Tools for calling OpenAI API Assistants
 *
 * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
 *
 * Note: [🦖] There are several different things in Promptbook:
 * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
 * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
 * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
 * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
 * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
 *
 * @deprecated Use `OpenAiAgentKitExecutionTools` instead.
 * @public exported from `@promptbook/openai`
 */
export declare class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler implements LlmExecutionTools {
    readonly assistantId: string_token;
    private readonly isCreatingNewAssistantsAllowed;
    /**
     * Creates OpenAI Execution Tools.
     *
     * @param options which are relevant are directly passed to the OpenAI client
     */
    constructor(options: OpenAiAssistantExecutionToolsOptions);
    get title(): string_title & string_markdown_text;
    get description(): string_markdown;
    /**
     * Calls OpenAI API to use a chat model.
     */
    callChatModel(prompt: Prompt): Promise<ChatPromptResult>;
    /**
     * Calls OpenAI API to use a chat model with streaming.
     */
    callChatModelStream(prompt: Prompt, onProgress: (chunk: ChatPromptResult) => void, options?: CallChatModelStreamOptions): Promise<ChatPromptResult>;
    /**
     * Get an existing assistant tool wrapper
     */
    getAssistant(assistantId: string_token): OpenAiAssistantExecutionTools;
    createNewAssistant(options: {
        /**
         * Name of the new assistant
         */
        readonly name: string_title;
        /**
         * Instructions for the new assistant
         */
        readonly instructions: string_markdown;
        /**
         * Optional list of knowledge source links (URLs or file paths) to attach to the assistant via vector store
         */
        readonly knowledgeSources?: ReadonlyArray<string>;
        /**
         * Optional list of tools to attach to the assistant
         */
        readonly tools?: ModelRequirements['tools'];
    }): Promise<OpenAiAssistantExecutionTools>;
    updateAssistant(options: {
        /**
         * ID of the assistant to update
         */
        readonly assistantId: string_token;
        /**
         * Name of the assistant
         */
        readonly name?: string_title;
        /**
         * Instructions for the assistant
         */
        readonly instructions?: string_markdown;
        /**
         * Optional list of knowledge source links (URLs or file paths) to attach to the assistant via vector store
         */
        readonly knowledgeSources?: ReadonlyArray<string>;
        /**
         * Optional list of tools to attach to the assistant
         */
        readonly tools?: ModelRequirements['tools'];
    }): Promise<OpenAiAssistantExecutionTools>;
    /**
     * Discriminant for type guards
     */
    protected get discriminant(): string;
    /**
     * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAssistantExecutionTools`
     *
     * Note: This is useful when you can possibly have multiple versions of `@promptbook/openai` installed
     */
    static isOpenAiAssistantExecutionTools(llmExecutionTools: LlmExecutionTools): llmExecutionTools is OpenAiAssistantExecutionTools;
}
/**
 * TODO: !!!!! [✨🥚] Knowledge should work both with and without scrapers
 * TODO: [🙎] In `OpenAiAssistantExecutionTools` Allow to create abstract assistants with `isCreatingNewAssistantsAllowed`
 * TODO: [🧠][🧙‍♂️] Maybe there can be some wizard for those who want to use just OpenAI
 * TODO: Maybe make custom OpenAiError
 * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
 * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
 */
