{"version":3,"file":"metadata.d.ts","names":["MessageOutputVersion","ResponseMetadata","mergeResponseMetadata","ModalitiesTokenDetails","InputTokenDetails","OutputTokenDetails","UsageMetadata","mergeUsageMetadata"],"sources":["../../src/messages/metadata.d.ts"],"sourcesContent":["import type { MessageOutputVersion } from \"./message.js\";\nexport type ResponseMetadata = {\n    model_provider?: string;\n    model_name?: string;\n    output_version?: MessageOutputVersion;\n    [key: string]: unknown;\n};\nexport declare function mergeResponseMetadata(a?: ResponseMetadata, b?: ResponseMetadata): ResponseMetadata;\nexport type ModalitiesTokenDetails = {\n    /**\n     * Text tokens.\n     * Does not need to be reported, but some models will do so.\n     */\n    text?: number;\n    /**\n     * Image (non-video) tokens.\n     */\n    image?: number;\n    /**\n     * Audio tokens.\n     */\n    audio?: number;\n    /**\n     * Video tokens.\n     */\n    video?: number;\n    /**\n     * Document tokens.\n     * e.g. PDF\n     */\n    document?: number;\n};\n/**\n * Breakdown of input token counts.\n *\n * Does not *need* to sum to full input token count. Does *not* need to have all keys.\n */\nexport type InputTokenDetails = ModalitiesTokenDetails & {\n    /**\n     * Input tokens that were cached and there was a cache hit.\n     *\n     * Since there was a cache hit, the tokens were read from the cache.\n     * More precisely, the model state given these tokens was read from the cache.\n     */\n    cache_read?: number;\n    /**\n     * Input tokens that were cached and there was a cache miss.\n     *\n     * Since there was a cache miss, the cache was created from these tokens.\n     */\n    cache_creation?: number;\n};\n/**\n * Breakdown of output token counts.\n *\n * Does *not* need to sum to full output token count. Does *not* need to have all keys.\n */\nexport type OutputTokenDetails = ModalitiesTokenDetails & {\n    /**\n     * Reasoning output tokens.\n     *\n     * Tokens generated by the model in a chain of thought process (i.e. by\n     * OpenAI's o1 models) that are not returned as part of model output.\n     */\n    reasoning?: number;\n};\n/**\n * Usage metadata for a message, such as token counts.\n */\nexport type UsageMetadata = {\n    /**\n     * Count of input (or prompt) tokens. Sum of all input token types.\n     */\n    input_tokens: number;\n    /**\n     * Count of output (or completion) tokens. Sum of all output token types.\n     */\n    output_tokens: number;\n    /**\n     * Total token count. Sum of input_tokens + output_tokens.\n     */\n    total_tokens: number;\n    /**\n     * Breakdown of input token counts.\n     *\n     * Does *not* need to sum to full input token count. Does *not* need to have all keys.\n     */\n    input_token_details?: InputTokenDetails;\n    /**\n     * Breakdown of output token counts.\n     *\n     * Does *not* need to sum to full output token count. Does *not* need to have all keys.\n     */\n    output_token_details?: OutputTokenDetails;\n};\nexport declare function mergeUsageMetadata(a?: UsageMetadata, b?: UsageMetadata): UsageMetadata;\n"],"mappings":";;;KACYC,gBAAAA;;EAAAA,UAAAA,CAAAA,EAAAA,MAAgB;EAMJC,cAAAA,CAAAA,EAHHF,oBAGwB;EAAA,CAAA,GAAA,EAAA,MAAA,CAAA,EAAA,OAAA;CAAA;AAA2BC,iBAAhDC,qBAAAA,CAAgDD,CAAAA,CAAAA,EAAtBA,gBAAsBA,EAAAA,CAAAA,CAAAA,EAAAA,gBAAAA,CAAAA,EAAmBA,gBAAnBA;AAAmBA,KAC/EE,sBAAAA,GAD+EF;EAAgB;AAC3G;AA6BA;AAoBA;EAYYK,IAAAA,CAAAA,EAAAA,MAAAA;EAAa;;;EAwBoB,KAAA,CAAA,EAAA,MAAA;EAErBC;;;EAAoC,KAAMD,CAAAA,EAAAA,MAAAA;EAAa;AAAgB;;;;;;;;;;;;;;KA1DnFF,iBAAAA,GAAoBD;;;;;;;;;;;;;;;;;;;;KAoBpBE,kBAAAA,GAAqBF;;;;;;;;;;;;KAYrBG,aAAAA;;;;;;;;;;;;;;;;;;wBAkBcF;;;;;;yBAMCC;;iBAEHE,kBAAAA,KAAuBD,mBAAmBA,gBAAgBA"}