var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __hasOwnProp = Object.prototype.hasOwnProperty; var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __commonJS = (cb, mod) => function __require() { return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports; }; var __export = (target, all) => { for (var name2 in all) __defProp(target, name2, { get: all[name2], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); // src/locales/zh-CN.schema.yml var require_zh_CN_schema = __commonJS({ "src/locales/zh-CN.schema.yml"(exports2, module2) { module2.exports = { $inner: [{}, { $desc: "请求设置", apiKeys: { $inner: ["API for Open LLMs 的 API Key", "API for Open LLMs 自搭建后端的请求地址"], $desc: "API for Open LLMs 服务的 API Key 和请求地址列表。" }, embeddings: { $desc: "Embeddings 模型的名称。" } }, { $desc: "模型设置", maxTokens: "回复的最大 Token 数(16~36000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 512 token。", temperature: "回复的随机性程度,数值越高,回复越随机。", presencePenalty: "重复惩罚系数,数值越高,越不易重复出现已出现过至少一次的 Token(范围:-2~2,步长:0.1)。", frequencyPenalty: "频率惩罚系数,数值越高,越不易重复出现次数较多的 Token(范围:-2~2,步长:0.1)。" }] }; } }); // src/locales/en-US.schema.yml var require_en_US_schema = __commonJS({ "src/locales/en-US.schema.yml"(exports2, module2) { module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Open LLMs API Key", "Self-hosted Open LLMs API URL (optional)"], $desc: "Credentials for Open LLMs API access." }, embeddings: { $desc: "Specify the embeddings model name for vector operations." } }, { $desc: "Model Parameters", maxTokens: "Maximum number of tokens in response (range: 16-36000, must be multiple of 16).", temperature: "Sampling temperature for response generation (range: 0-2, higher values increase randomness).", presencePenalty: "Penalty for token presence in text (range: -2 to 2, step 0.1, positive values encourage new topics).).", frequencyPenalty: "Penalty for token frequency in text (range: -2 to 2, step 0.1, positive values reduce repetition)." }] }; } }); // src/index.ts var src_exports = {}; __export(src_exports, { Config: () => Config, apply: () => apply, inject: () => inject, name: () => name }); module.exports = __toCommonJS(src_exports); var import_chat = require("koishi-plugin-chatluna/services/chat"); var import_koishi = require("koishi"); // src/client.ts var import_client = require("koishi-plugin-chatluna/llm-core/platform/client"); var import_model = require("koishi-plugin-chatluna/llm-core/platform/model"); var import_types = require("koishi-plugin-chatluna/llm-core/platform/types"); var import_error2 = require("koishi-plugin-chatluna/utils/error"); // src/requester.ts var import_outputs = require("@langchain/core/outputs"); var import_api = require("koishi-plugin-chatluna/llm-core/platform/api"); var import_error = require("koishi-plugin-chatluna/utils/error"); var import_sse = require("koishi-plugin-chatluna/utils/sse"); // src/utils.ts var import_messages = require("@langchain/core/messages"); var import_zod_to_json_schema = require("zod-to-json-schema"); function langchainMessageToOpenAIMessage(messages) { return messages.map((it) => { const role = messageTypeToOpenAIRole(it._getType()); return { role, content: it.content, name: role === "function" ? it.name : void 0 }; }); } __name(langchainMessageToOpenAIMessage, "langchainMessageToOpenAIMessage"); function messageTypeToOpenAIRole(type) { switch (type) { case "system": return "system"; case "ai": return "assistant"; case "human": return "user"; case "function": return "function"; default: throw new Error(`Unknown message type: ${type}`); } } __name(messageTypeToOpenAIRole, "messageTypeToOpenAIRole"); function formatToolsToOpenAIFunctions(tools) { return tools.map(formatToolToOpenAIFunction); } __name(formatToolsToOpenAIFunctions, "formatToolsToOpenAIFunctions"); function formatToolToOpenAIFunction(tool) { return { name: tool.name, description: tool.description, // any? // eslint-disable-next-line @typescript-eslint/no-explicit-any parameters: (0, import_zod_to_json_schema.zodToJsonSchema)(tool.schema) }; } __name(formatToolToOpenAIFunction, "formatToolToOpenAIFunction"); function convertDeltaToMessageChunk(delta, defaultRole) { const role = delta.role ?? defaultRole; const content = delta.content ?? ""; let additional_kwargs; if (delta.function_call) { additional_kwargs = { function_call: delta.function_call }; } else { additional_kwargs = {}; } if (role === "user") { return new import_messages.HumanMessageChunk({ content }); } else if (role === "assistant") { return new import_messages.AIMessageChunk({ content, additional_kwargs }); } else if (role === "system") { return new import_messages.SystemMessageChunk({ content }); } else if (role === "function") { return new import_messages.FunctionMessageChunk({ content, additional_kwargs, name: delta.name }); } else { return new import_messages.ChatMessageChunk({ content, role }); } } __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk"); // src/requester.ts var OpenLLMRequester = class extends import_api.ModelRequester { constructor(_config, _plugin) { super(); this._config = _config; this._plugin = _plugin; } static { __name(this, "OpenLLMRequester"); } async *completionStream(params) { try { const response = await this._post( "chat/completions", { model: params.model, messages: langchainMessageToOpenAIMessage(params.input), functions: params.tools != null ? formatToolsToOpenAIFunctions(params.tools) : void 0, stop: params.stop, max_tokens: params.maxTokens, temperature: params.temperature, presence_penalty: params.presencePenalty, frequency_penalty: params.frequencyPenalty, n: params.n, top_p: params.topP, user: params.user ?? "user", stream: true, logit_bias: params.logitBias }, { signal: params.signal } ); const iterator = (0, import_sse.sseIterable)(response); let content = ""; let defaultRole = "assistant"; let errorCount = 0; const findTools = params.tools != null; for await (const event of iterator) { const chunk = event.data; if (chunk === "[DONE]") { return; } try { const data = JSON.parse(chunk); if (data.error) { throw new import_error.ChatLunaError( import_error.ChatLunaErrorCode.API_REQUEST_FAILED, new Error( "error when calling openai completion, Result: " + chunk ) ); } const choice = data.choices?.[0]; if (!choice) { continue; } const { delta } = choice; const messageChunk = convertDeltaToMessageChunk( delta, defaultRole ); if (!findTools) { content = content + messageChunk.content; messageChunk.content = content; } defaultRole = delta.role ?? defaultRole; const generationChunk = new import_outputs.ChatGenerationChunk({ message: messageChunk, text: messageChunk.content }); yield generationChunk; } catch (e) { if (errorCount > 5) { throw new import_error.ChatLunaError( import_error.ChatLunaErrorCode.API_REQUEST_FAILED, new Error( "error when calling openai completion, Result: " + chunk ) ); } else { errorCount++; continue; } } } } catch (e) { if (e instanceof import_error.ChatLunaError) { throw e; } else { throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e); } } } async embeddings(params) { let data; try { const response = await this._post("embeddings", { input: params.input, model: params.model }); data = await response.text(); data = JSON.parse(data); if (data.data && data.data.length > 0) { return data.data.map( (it) => it.embedding ); } throw new Error(); } catch (e) { const error = new Error( "error when calling embeddings, Result: " + JSON.stringify(data) ); error.stack = e.stack; error.cause = e.cause; throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, error); } } async getModels() { let data; try { const response = await this._get("models"); data = await response.text(); data = JSON.parse(data); return data.data.map((model) => model.id); } catch (e) { const error = new Error( "error when listing models, Result: " + JSON.stringify(data) ); error.stack = e.stack; error.cause = e.cause; throw error; } } // eslint-disable-next-line @typescript-eslint/no-explicit-any _post(url, data, params = {}) { const requestUrl = this._concatUrl(url); const body = JSON.stringify(data); return this._plugin.fetch(requestUrl, { body, headers: this._buildHeaders(), method: "POST", ...params }); } _get(url) { const requestUrl = this._concatUrl(url); return this._plugin.fetch(requestUrl, { method: "GET", headers: this._buildHeaders() }); } _buildHeaders() { return { Authorization: `Bearer ${this._config.apiKey}`, "Content-Type": "application/json" }; } _concatUrl(url) { const apiEndPoint = this._config.apiEndpoint; if (!apiEndPoint.match(/\/v1\/?$/)) { if (apiEndPoint.endsWith("/")) { return apiEndPoint + "v1/" + url; } return apiEndPoint + "/v1/" + url; } if (apiEndPoint.endsWith("/")) { return apiEndPoint + url; } return apiEndPoint + "/" + url; } async init() { } async dispose() { } }; // src/client.ts var OpenLLMClient = class extends import_client.PlatformModelAndEmbeddingsClient { constructor(ctx, _config, clientConfig, plugin) { super(ctx, clientConfig); this._config = _config; this._requester = new OpenLLMRequester(clientConfig, plugin); } static { __name(this, "OpenLLMClient"); } platform = "chatglm"; _requester; _models; async init() { await this.getModels(); } async refreshModels() { try { const rawModels = await this._requester.getModels(); return rawModels.map((model) => { return { name: model, type: import_types.ModelType.llm, functionCall: model.includes("chatglm3") || model.includes("qwen"), supportMode: ["all"] }; }).concat([ { name: this._config.embeddings, type: import_types.ModelType.embeddings, supportMode: [] } ]); } catch (e) { throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR, e); } } async getModels() { if (this._models) { return Object.values(this._models); } const models = await this.getModels(); this._models = {}; for (const model of models) { this._models[model.name] = model; } return models; } _createModel(model) { const info = this._models[model]; if (info == null) { throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_NOT_FOUND); } if (info.type === import_types.ModelType.llm) { return new import_model.ChatLunaChatModel({ modelInfo: info, requester: this._requester, model, maxTokenLimit: this._config.maxTokens, frequencyPenalty: this._config.frequencyPenalty, presencePenalty: this._config.presencePenalty, timeout: this._config.timeout, temperature: this._config.temperature, maxRetries: this._config.maxRetries, llmType: "chatglm", modelMaxContextSize: getModelContextSize(model) }); } return new import_model.ChatLunaEmbeddings({ client: this._requester, maxRetries: this._config.maxRetries }); } }; function getModelContextSize(model) { model = model.toLowerCase(); if (model.includes("chatglm2")) { return 8192; } if (model.includes("qwen")) { return 8192; } return 4096; } __name(getModelContextSize, "getModelContextSize"); // src/index.ts function apply(ctx, config) { const plugin = new import_chat.ChatLunaPlugin(ctx, config, "openllm"); ctx.on("ready", async () => { plugin.registerToService(); await plugin.parseConfig((config2) => { return config2.apiKeys.map(([apiKey, apiEndpoint]) => { return { apiKey, apiEndpoint, platform: "openllm", chatLimit: config2.chatTimeLimit, timeout: config2.timeout, maxRetries: config2.maxRetries, concurrentMaxSize: config2.chatConcurrentMaxSize }; }); }); plugin.registerClient( (_, clientConfig) => new OpenLLMClient(ctx, config, clientConfig, plugin) ); await plugin.initClients(); }); } __name(apply, "apply"); var Config = import_koishi.Schema.intersect([ import_chat.ChatLunaPlugin.Config, import_koishi.Schema.object({ apiKeys: import_koishi.Schema.array( import_koishi.Schema.tuple([ import_koishi.Schema.string().role("secret").default(""), import_koishi.Schema.string().default("http://127.0.0.1:8000") ]) ).default([["", "http://127.0.0.1:8000"]]), embeddings: import_koishi.Schema.string().default("moka-ai/m3e-base") }), import_koishi.Schema.object({ maxTokens: import_koishi.Schema.number().min(16).max(36e3).step(16).default(1024), temperature: import_koishi.Schema.percent().min(0).max(1).step(0.1).default(0.8), presencePenalty: import_koishi.Schema.number().min(-2).max(2).step(0.1).default(0.2), frequencyPenalty: import_koishi.Schema.number().min(-2).max(2).step(0.1).default(0.2) }) ]).i18n({ "zh-CN": require_zh_CN_schema(), "en-US": require_en_US_schema() // eslint-disable-next-line @typescript-eslint/no-explicit-any }); var inject = ["chatluna"]; var name = "chatluna-chatglm-adapter"; // Annotate the CommonJS export names for ESM import in node: 0 && (module.exports = { Config, apply, inject, name });