{
  "knxUltimateAI": {
    "title": "KNX AI (Traffic Analyzer)",
    "sections": {
      "capture": "Capture",
      "storage": "Speicher & Zusammenfassung",
      "detection": "Erkennung & Warnungen",
      "llmConnection": "KI-Assistent-Verbindung",
      "llmContext": "KI-Assistent-Kontext",
      "advanced": "Erweiterte Einstellungen"
    },
    "properties": {
      "server": "Gateway",
      "name": "Name",
      "topic": "Topic",
      "notifywrite": "Capture GroupValue_Write",
      "notifyresponse": "Capture GroupValue_Response",
      "notifyreadrequest": "Capture GroupValue_Read",
      "analysisWindowSec": "Analysis window (seconds)",
      "historyWindowSec": "History window (seconds)",
      "historyStoreToDisk": "Captured telegrams also on disk archivieren",
      "historyStoreRetentionDays": "Aufbewahrung des Festplattenarchivs (Tage)",
      "maxEvents": "Max stored events",
      "emitIntervalSec": "Auto emit summary (seconds, 0=off)",
      "topN": "Top list size",
      "enablePattern": "Detect simple patterns (A -> B)",
      "patternMaxLagMs": "Pattern max lag (ms)",
      "patternMinCount": "Pattern min occurrences",
      "rateWindowSec": "Rate window (seconds)",
      "maxTelegramPerSecOverall": "Max overall telegrams/sec (0=off)",
      "maxTelegramPerSecPerGA": "Max telegrams/sec per GA (0=off)",
      "flapWindowSec": "Flap window (seconds)",
      "flapMaxChanges": "Max changes per GA in window (0=off)",
      "llmEnabled": "Enable LLM assistant",
      "llmProvider": "Provider",
      "llmBaseUrl": "Endpoint URL",
      "llmApiKey": "API key",
      "llmModel": "Model",
      "llmSystemPrompt": "System prompt",
      "llmIncludeRaw": "Include raw payload hex",
      "llmIncludeFlowContext": "Node-RED-Projektinventar einbeziehen",
      "llmIncludeDocsSnippets": "Include documentation snippets (help/README/examples)",
      "llmDocsLanguage": "Docs language"
    },
    "outputs": {
      "summary": "Zusammenfassung/Statistik",
      "anomalies": "Anomalien",
      "assistant": "KI-Assistent"
    },
    "selectlists": {
      "llmProvider": {
        "openai_compat": "OpenAI-compatible (chat/completions)",
        "ollama": "Ollama (local, beta)"
      }
    },
    "placeholder": {
      "llmBaseUrl": "https://api.openai.com/v1/chat/completions (or your compatible endpoint)",
      "llmApiKey": "Paste API key (starts with sk-)",
      "llmModel": "e.g. gpt-4o-mini",
      "llmSystemPrompt": "Optional. Leave empty for default."
    },
    "messages": {
      "ollamaNotSupported": "Ollama local mode: API key not required. Default endpoint is http://localhost:11434/api/chat.",
      "ollamaNoModels": "No local Ollama model found. Install one or pick one from the library.",
      "installingOllamaModel": "Starting Ollama and installing model…",
      "installedOllamaModel": "Ollama model installed",
      "installOllamaModelFailed": "Failed to install Ollama model",
      "ollamaInstallSteps": "1) Open the model library and copy the model name (for example llama3.1). 2) Put the name in the Model field and click Install it.",
      "ollamaStartedAuto": "Ollama server started automatically."
    },
    "sidebar": {
      "ui": {
        "refreshNodeList": "Knotenliste aktualisieren",
        "refreshSummary": "Zusammenfassung aktualisieren",
        "auto": "Auto",
        "sections": {
          "summary": "Zusammenfassung",
          "anomalies": "Anomalien",
          "ask": "Fragen"
        },
        "empty": {
          "noNodes": "Keine KNX AI-Knoten gefunden.",
          "noAnomalies": "Keine Anomalien."
        },
        "chat": {
          "placeholder": "Stelle eine Frage zum KNX-Verkehr…",
          "send": "Senden",
          "pending": "Ich denke nach…",
          "llmDisabled": "LLM in der Knoten-Konfiguration deaktiviert",
          "emptyAnswer": "(leere Antwort)"
        },
        "status": {
          "ready": "Bereit",
          "loadingNodes": "Lade Knoten…",
          "loading": "Laden…",
          "asking": "Frage…"
        },
        "errors": {
          "loadNodes": "Knoten konnten nicht geladen werden",
          "loadState": "Status konnte nicht geladen werden",
          "askFailed": "Anfrage fehlgeschlagen"
        }
      },
      "summary": {
        "noData": "Keine Daten verfügbar.",
        "header": {
          "gateway": "Gateway: {{gatewayName}}",
          "updated": "Aktualisiert: {{at}}"
        },
        "analysisWindowLine": "Analysefenster: {{seconds}}s",
        "statsLine": "Telegramme: {{telegrams}} · Rate: {{rate}}/s · Echoed: {{echoed}} · Unbekannte DPT: {{unknownDpt}}",
        "topGAsTitle": "Top-Gruppenadressen:",
        "eventsTitle": "Ereignisse:",
        "patternsTitle": "Muster (wiederkehrende Sequenzen):",
        "patternItem": "{{from}} → {{to}}  ({{count}} mal innerhalb von {{withinMs}}ms)"
      }
    },
    "buttons": {
      "installOllamaModel": "2) Install it",
      "ollamaLibrary": "Model library",
      "downloadOllamaModel": "1) Download model"
    }
  }
}
